2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2003-2008 Joseph Koshy
5 * Copyright (c) 2007 The FreeBSD Foundation
6 * Copyright (c) 2018 Matthew Macy
9 * Portions of this software were developed by A. Joseph Koshy under
10 * sponsorship from the FreeBSD Foundation and Google, Inc.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/domainset.h>
41 #include <sys/eventhandler.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/limits.h>
47 #include <sys/malloc.h>
48 #include <sys/module.h>
49 #include <sys/mount.h>
50 #include <sys/mutex.h>
52 #include <sys/pmckern.h>
53 #include <sys/pmclog.h>
56 #include <sys/queue.h>
57 #include <sys/resourcevar.h>
58 #include <sys/rwlock.h>
59 #include <sys/sched.h>
60 #include <sys/signalvar.h>
63 #include <sys/sysctl.h>
64 #include <sys/sysent.h>
65 #include <sys/syslog.h>
66 #include <sys/taskqueue.h>
67 #include <sys/vnode.h>
69 #include <sys/linker.h> /* needs to be after <sys/malloc.h> */
71 #include <machine/atomic.h>
72 #include <machine/md_var.h>
75 #include <vm/vm_extern.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_object.h>
80 #include "hwpmc_soft.h"
82 #define PMC_EPOCH_ENTER() struct epoch_tracker pmc_et; epoch_enter_preempt(global_epoch_preempt, &pmc_et)
83 #define PMC_EPOCH_EXIT() epoch_exit_preempt(global_epoch_preempt, &pmc_et)
90 PMC_FLAG_NONE = 0x00, /* do nothing */
91 PMC_FLAG_REMOVE = 0x01, /* atomically remove entry from hash */
92 PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */
93 PMC_FLAG_NOWAIT = 0x04, /* do not wait for mallocs */
97 * The offset in sysent where the syscall is allocated.
100 static int pmc_syscall_num = NO_SYSCALL;
101 struct pmc_cpu **pmc_pcpu; /* per-cpu state */
102 pmc_value_t *pmc_pcpu_saved; /* saved PMC values: CSW handling */
104 #define PMC_PCPU_SAVED(C,R) pmc_pcpu_saved[(R) + md->pmd_npmc*(C)]
106 struct mtx_pool *pmc_mtxpool;
107 static int *pmc_pmcdisp; /* PMC row dispositions */
109 #define PMC_ROW_DISP_IS_FREE(R) (pmc_pmcdisp[(R)] == 0)
110 #define PMC_ROW_DISP_IS_THREAD(R) (pmc_pmcdisp[(R)] > 0)
111 #define PMC_ROW_DISP_IS_STANDALONE(R) (pmc_pmcdisp[(R)] < 0)
113 #define PMC_MARK_ROW_FREE(R) do { \
114 pmc_pmcdisp[(R)] = 0; \
117 #define PMC_MARK_ROW_STANDALONE(R) do { \
118 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
120 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
121 KASSERT(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active()), \
122 ("[pmc,%d] row disposition error", __LINE__)); \
125 #define PMC_UNMARK_ROW_STANDALONE(R) do { \
126 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
127 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
131 #define PMC_MARK_ROW_THREAD(R) do { \
132 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
134 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
137 #define PMC_UNMARK_ROW_THREAD(R) do { \
138 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
139 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
144 /* various event handlers */
145 static eventhandler_tag pmc_exit_tag, pmc_fork_tag, pmc_kld_load_tag,
148 /* Module statistics */
149 struct pmc_driverstats pmc_stats;
152 /* Machine/processor dependent operations */
153 static struct pmc_mdep *md;
156 * Hash tables mapping owner processes and target threads to PMCs.
159 struct mtx pmc_processhash_mtx; /* spin mutex */
160 static u_long pmc_processhashmask;
161 static LIST_HEAD(pmc_processhash, pmc_process) *pmc_processhash;
164 * Hash table of PMC owner descriptors. This table is protected by
165 * the shared PMC "sx" lock.
168 static u_long pmc_ownerhashmask;
169 static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash;
172 * List of PMC owners with system-wide sampling PMCs.
175 static CK_LIST_HEAD(, pmc_owner) pmc_ss_owners;
178 * List of free thread entries. This is protected by the spin
181 static struct mtx pmc_threadfreelist_mtx; /* spin mutex */
182 static LIST_HEAD(, pmc_thread) pmc_threadfreelist;
183 static int pmc_threadfreelist_entries=0;
184 #define THREADENTRY_SIZE \
185 (sizeof(struct pmc_thread) + (md->pmd_npmc * sizeof(struct pmc_threadpmcstate)))
188 * Task to free thread descriptors
190 static struct task free_task;
193 * A map of row indices to classdep structures.
195 static struct pmc_classdep **pmc_rowindex_to_classdep;
202 static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS);
203 static int pmc_debugflags_parse(char *newstr, char *fence);
206 static int load(struct module *module, int cmd, void *arg);
207 static int pmc_add_sample(ring_type_t ring, struct pmc *pm, struct trapframe *tf);
208 static void pmc_add_thread_descriptors_from_proc(struct proc *p,
209 struct pmc_process *pp);
210 static int pmc_attach_process(struct proc *p, struct pmc *pm);
211 static struct pmc *pmc_allocate_pmc_descriptor(void);
212 static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p);
213 static int pmc_attach_one_process(struct proc *p, struct pmc *pm);
214 static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri,
216 static int pmc_can_attach(struct pmc *pm, struct proc *p);
217 static void pmc_capture_user_callchain(int cpu, int soft, struct trapframe *tf);
218 static void pmc_cleanup(void);
219 static int pmc_detach_process(struct proc *p, struct pmc *pm);
220 static int pmc_detach_one_process(struct proc *p, struct pmc *pm,
222 static void pmc_destroy_owner_descriptor(struct pmc_owner *po);
223 static void pmc_destroy_pmc_descriptor(struct pmc *pm);
224 static void pmc_destroy_process_descriptor(struct pmc_process *pp);
225 static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p);
226 static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm);
227 static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po,
229 static struct pmc_process *pmc_find_process_descriptor(struct proc *p,
231 static struct pmc_thread *pmc_find_thread_descriptor(struct pmc_process *pp,
232 struct thread *td, uint32_t mode);
233 static void pmc_force_context_switch(void);
234 static void pmc_link_target_process(struct pmc *pm,
235 struct pmc_process *pp);
236 static void pmc_log_all_process_mappings(struct pmc_owner *po);
237 static void pmc_log_kernel_mappings(struct pmc *pm);
238 static void pmc_log_process_mappings(struct pmc_owner *po, struct proc *p);
239 static void pmc_maybe_remove_owner(struct pmc_owner *po);
240 static void pmc_process_csw_in(struct thread *td);
241 static void pmc_process_csw_out(struct thread *td);
242 static void pmc_process_exit(void *arg, struct proc *p);
243 static void pmc_process_fork(void *arg, struct proc *p1,
244 struct proc *p2, int n);
245 static void pmc_process_samples(int cpu, ring_type_t soft);
246 static void pmc_release_pmc_descriptor(struct pmc *pmc);
247 static void pmc_process_thread_add(struct thread *td);
248 static void pmc_process_thread_delete(struct thread *td);
249 static void pmc_process_thread_userret(struct thread *td);
250 static void pmc_remove_owner(struct pmc_owner *po);
251 static void pmc_remove_process_descriptor(struct pmc_process *pp);
252 static void pmc_restore_cpu_binding(struct pmc_binding *pb);
253 static void pmc_save_cpu_binding(struct pmc_binding *pb);
254 static void pmc_select_cpu(int cpu);
255 static int pmc_start(struct pmc *pm);
256 static int pmc_stop(struct pmc *pm);
257 static int pmc_syscall_handler(struct thread *td, void *syscall_args);
258 static struct pmc_thread *pmc_thread_descriptor_pool_alloc(void);
259 static void pmc_thread_descriptor_pool_drain(void);
260 static void pmc_thread_descriptor_pool_free(struct pmc_thread *pt);
261 static void pmc_unlink_target_process(struct pmc *pmc,
262 struct pmc_process *pp);
263 static int generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp);
264 static int generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp);
265 static struct pmc_mdep *pmc_generic_cpu_initialize(void);
266 static void pmc_generic_cpu_finalize(struct pmc_mdep *md);
267 static void pmc_post_callchain_callback(void);
268 static void pmc_process_threadcreate(struct thread *td);
269 static void pmc_process_threadexit(struct thread *td);
270 static void pmc_process_proccreate(struct proc *p);
271 static void pmc_process_allproc(struct pmc *pm);
274 * Kernel tunables and sysctl(8) interface.
277 SYSCTL_DECL(_kern_hwpmc);
278 SYSCTL_NODE(_kern_hwpmc, OID_AUTO, stats, CTLFLAG_RW, 0, "HWPMC stats");
282 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_ignored, CTLFLAG_RW,
283 &pmc_stats.pm_intr_ignored, "# of interrupts ignored");
284 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_processed, CTLFLAG_RW,
285 &pmc_stats.pm_intr_processed, "# of interrupts processed");
286 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_bufferfull, CTLFLAG_RW,
287 &pmc_stats.pm_intr_bufferfull, "# of interrupts where buffer was full");
288 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, syscalls, CTLFLAG_RW,
289 &pmc_stats.pm_syscalls, "# of syscalls");
290 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, syscall_errors, CTLFLAG_RW,
291 &pmc_stats.pm_syscall_errors, "# of syscall_errors");
292 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, buffer_requests, CTLFLAG_RW,
293 &pmc_stats.pm_buffer_requests, "# of buffer requests");
294 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, buffer_requests_failed, CTLFLAG_RW,
295 &pmc_stats.pm_buffer_requests_failed, "# of buffer requests which failed");
296 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, log_sweeps, CTLFLAG_RW,
297 &pmc_stats.pm_log_sweeps, "# of ?");
298 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, merges, CTLFLAG_RW,
299 &pmc_stats.pm_merges, "# of times kernel stack was found for user trace");
300 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, overwrites, CTLFLAG_RW,
301 &pmc_stats.pm_overwrites, "# of times a sample was overwritten before being logged");
303 static int pmc_callchaindepth = PMC_CALLCHAIN_DEPTH;
304 SYSCTL_INT(_kern_hwpmc, OID_AUTO, callchaindepth, CTLFLAG_RDTUN,
305 &pmc_callchaindepth, 0, "depth of call chain records");
307 char pmc_cpuid[PMC_CPUID_LEN];
308 SYSCTL_STRING(_kern_hwpmc, OID_AUTO, cpuid, CTLFLAG_RD,
309 pmc_cpuid, 0, "cpu version string");
311 struct pmc_debugflags pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS;
312 char pmc_debugstr[PMC_DEBUG_STRSIZE];
313 TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,
314 sizeof(pmc_debugstr));
315 SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
316 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
317 0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags");
322 * kern.hwpmc.hashrows -- determines the number of rows in the
323 * of the hash table used to look up threads
326 static int pmc_hashsize = PMC_HASH_SIZE;
327 SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_RDTUN,
328 &pmc_hashsize, 0, "rows in hash tables");
331 * kern.hwpmc.nsamples --- number of PC samples/callchain stacks per CPU
334 static int pmc_nsamples = PMC_NSAMPLES;
335 SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_RDTUN,
336 &pmc_nsamples, 0, "number of PC samples per CPU");
338 static uint64_t pmc_sample_mask = PMC_NSAMPLES-1;
341 * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool.
344 static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE;
345 SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_RDTUN,
346 &pmc_mtxpool_size, 0, "size of spin mutex pool");
350 * kern.hwpmc.threadfreelist_entries -- number of free entries
353 SYSCTL_INT(_kern_hwpmc, OID_AUTO, threadfreelist_entries, CTLFLAG_RD,
354 &pmc_threadfreelist_entries, 0, "number of avalable thread entries");
358 * kern.hwpmc.threadfreelist_max -- maximum number of free entries
361 static int pmc_threadfreelist_max = PMC_THREADLIST_MAX;
362 SYSCTL_INT(_kern_hwpmc, OID_AUTO, threadfreelist_max, CTLFLAG_RW,
363 &pmc_threadfreelist_max, 0,
364 "maximum number of available thread entries before freeing some");
368 * security.bsd.unprivileged_syspmcs -- allow non-root processes to
369 * allocate system-wide PMCs.
371 * Allowing unprivileged processes to allocate system PMCs is convenient
372 * if system-wide measurements need to be taken concurrently with other
373 * per-process measurements. This feature is turned off by default.
376 static int pmc_unprivileged_syspmcs = 0;
377 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RWTUN,
378 &pmc_unprivileged_syspmcs, 0,
379 "allow unprivileged process to allocate system PMCs");
382 * Hash function. Discard the lower 2 bits of the pointer since
383 * these are always zero for our uses. The hash multiplier is
384 * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
388 #define _PMC_HM 11400714819323198486u
390 #define _PMC_HM 2654435769u
392 #error Must know the size of 'long' to compile
395 #define PMC_HASH_PTR(P,M) ((((unsigned long) (P) >> 2) * _PMC_HM) & (M))
401 /* The `sysent' for the new syscall */
402 static struct sysent pmc_sysent = {
404 .sy_call = pmc_syscall_handler,
407 static struct syscall_module_data pmc_syscall_mod = {
410 .offset = &pmc_syscall_num,
411 .new_sysent = &pmc_sysent,
412 .old_sysent = { .sy_narg = 0, .sy_call = NULL },
413 .flags = SY_THR_STATIC_KLD,
416 static moduledata_t pmc_mod = {
417 .name = PMC_MODULE_NAME,
418 .evhand = syscall_module_handler,
419 .priv = &pmc_syscall_mod,
422 #ifdef EARLY_AP_STARTUP
423 DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SYSCALLS, SI_ORDER_ANY);
425 DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY);
427 MODULE_VERSION(pmc, PMC_VERSION);
430 enum pmc_dbgparse_state {
431 PMCDS_WS, /* in whitespace */
432 PMCDS_MAJOR, /* seen a major keyword */
437 pmc_debugflags_parse(char *newstr, char *fence)
440 struct pmc_debugflags *tmpflags;
441 int error, found, *newbits, tmp;
444 tmpflags = malloc(sizeof(*tmpflags), M_PMC, M_WAITOK|M_ZERO);
449 for (; p < fence && (c = *p); p++) {
451 /* skip white space */
452 if (c == ' ' || c == '\t')
455 /* look for a keyword followed by "=" */
456 for (q = p; p < fence && (c = *p) && c != '='; p++)
466 /* lookup flag group name */
467 #define DBG_SET_FLAG_MAJ(S,F) \
468 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
469 newbits = &tmpflags->pdb_ ## F;
471 DBG_SET_FLAG_MAJ("cpu", CPU);
472 DBG_SET_FLAG_MAJ("csw", CSW);
473 DBG_SET_FLAG_MAJ("logging", LOG);
474 DBG_SET_FLAG_MAJ("module", MOD);
475 DBG_SET_FLAG_MAJ("md", MDP);
476 DBG_SET_FLAG_MAJ("owner", OWN);
477 DBG_SET_FLAG_MAJ("pmc", PMC);
478 DBG_SET_FLAG_MAJ("process", PRC);
479 DBG_SET_FLAG_MAJ("sampling", SAM);
481 if (newbits == NULL) {
486 p++; /* skip the '=' */
488 /* Now parse the individual flags */
491 for (q = p; p < fence && (c = *p); p++)
492 if (c == ' ' || c == '\t' || c == ',')
495 /* p == fence or c == ws or c == "," or c == 0 */
497 if ((kwlen = p - q) == 0) {
503 #define DBG_SET_FLAG_MIN(S,F) \
504 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
505 tmp |= found = (1 << PMC_DEBUG_MIN_ ## F)
507 /* a '*' denotes all possible flags in the group */
508 if (kwlen == 1 && *q == '*')
510 /* look for individual flag names */
511 DBG_SET_FLAG_MIN("allocaterow", ALR);
512 DBG_SET_FLAG_MIN("allocate", ALL);
513 DBG_SET_FLAG_MIN("attach", ATT);
514 DBG_SET_FLAG_MIN("bind", BND);
515 DBG_SET_FLAG_MIN("config", CFG);
516 DBG_SET_FLAG_MIN("exec", EXC);
517 DBG_SET_FLAG_MIN("exit", EXT);
518 DBG_SET_FLAG_MIN("find", FND);
519 DBG_SET_FLAG_MIN("flush", FLS);
520 DBG_SET_FLAG_MIN("fork", FRK);
521 DBG_SET_FLAG_MIN("getbuf", GTB);
522 DBG_SET_FLAG_MIN("hook", PMH);
523 DBG_SET_FLAG_MIN("init", INI);
524 DBG_SET_FLAG_MIN("intr", INT);
525 DBG_SET_FLAG_MIN("linktarget", TLK);
526 DBG_SET_FLAG_MIN("mayberemove", OMR);
527 DBG_SET_FLAG_MIN("ops", OPS);
528 DBG_SET_FLAG_MIN("read", REA);
529 DBG_SET_FLAG_MIN("register", REG);
530 DBG_SET_FLAG_MIN("release", REL);
531 DBG_SET_FLAG_MIN("remove", ORM);
532 DBG_SET_FLAG_MIN("sample", SAM);
533 DBG_SET_FLAG_MIN("scheduleio", SIO);
534 DBG_SET_FLAG_MIN("select", SEL);
535 DBG_SET_FLAG_MIN("signal", SIG);
536 DBG_SET_FLAG_MIN("swi", SWI);
537 DBG_SET_FLAG_MIN("swo", SWO);
538 DBG_SET_FLAG_MIN("start", STA);
539 DBG_SET_FLAG_MIN("stop", STO);
540 DBG_SET_FLAG_MIN("syscall", PMS);
541 DBG_SET_FLAG_MIN("unlinktarget", TUL);
542 DBG_SET_FLAG_MIN("write", WRI);
544 /* unrecognized flag name */
549 if (c == 0 || c == ' ' || c == '\t') { /* end of flag group */
558 /* save the new flag set */
559 bcopy(tmpflags, &pmc_debugflags, sizeof(pmc_debugflags));
562 free(tmpflags, M_PMC);
567 pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS)
569 char *fence, *newstr;
573 (void) arg1; (void) arg2; /* unused parameters */
575 n = sizeof(pmc_debugstr);
576 newstr = malloc(n, M_PMC, M_WAITOK|M_ZERO);
577 (void) strlcpy(newstr, pmc_debugstr, n);
579 error = sysctl_handle_string(oidp, newstr, n, req);
581 /* if there is a new string, parse and copy it */
582 if (error == 0 && req->newptr != NULL) {
583 fence = newstr + (n < req->newlen ? n : req->newlen + 1);
584 if ((error = pmc_debugflags_parse(newstr, fence)) == 0)
585 (void) strlcpy(pmc_debugstr, newstr,
586 sizeof(pmc_debugstr));
596 * Map a row index to a classdep structure and return the adjusted row
597 * index for the PMC class index.
599 static struct pmc_classdep *
600 pmc_ri_to_classdep(struct pmc_mdep *md, int ri, int *adjri)
602 struct pmc_classdep *pcd;
606 KASSERT(ri >= 0 && ri < md->pmd_npmc,
607 ("[pmc,%d] illegal row-index %d", __LINE__, ri));
609 pcd = pmc_rowindex_to_classdep[ri];
612 ("[pmc,%d] ri %d null pcd", __LINE__, ri));
614 *adjri = ri - pcd->pcd_ri;
616 KASSERT(*adjri >= 0 && *adjri < pcd->pcd_num,
617 ("[pmc,%d] adjusted row-index %d", __LINE__, *adjri));
623 * Concurrency Control
625 * The driver manages the following data structures:
627 * - target process descriptors, one per target process
628 * - owner process descriptors (and attached lists), one per owner process
629 * - lookup hash tables for owner and target processes
630 * - PMC descriptors (and attached lists)
631 * - per-cpu hardware state
632 * - the 'hook' variable through which the kernel calls into
634 * - the machine hardware state (managed by the MD layer)
636 * These data structures are accessed from:
638 * - thread context-switch code
639 * - interrupt handlers (possibly on multiple cpus)
640 * - kernel threads on multiple cpus running on behalf of user
641 * processes doing system calls
642 * - this driver's private kernel threads
644 * = Locks and Locking strategy =
646 * The driver uses four locking strategies for its operation:
648 * - The global SX lock "pmc_sx" is used to protect internal
651 * Calls into the module by syscall() start with this lock being
652 * held in exclusive mode. Depending on the requested operation,
653 * the lock may be downgraded to 'shared' mode to allow more
654 * concurrent readers into the module. Calls into the module from
655 * other parts of the kernel acquire the lock in shared mode.
657 * This SX lock is held in exclusive mode for any operations that
658 * modify the linkages between the driver's internal data structures.
660 * The 'pmc_hook' function pointer is also protected by this lock.
661 * It is only examined with the sx lock held in exclusive mode. The
662 * kernel module is allowed to be unloaded only with the sx lock held
663 * in exclusive mode. In normal syscall handling, after acquiring the
664 * pmc_sx lock we first check that 'pmc_hook' is non-null before
665 * proceeding. This prevents races between the thread unloading the module
666 * and other threads seeking to use the module.
668 * - Lookups of target process structures and owner process structures
669 * cannot use the global "pmc_sx" SX lock because these lookups need
670 * to happen during context switches and in other critical sections
671 * where sleeping is not allowed. We protect these lookup tables
672 * with their own private spin-mutexes, "pmc_processhash_mtx" and
673 * "pmc_ownerhash_mtx".
675 * - Interrupt handlers work in a lock free manner. At interrupt
676 * time, handlers look at the PMC pointer (phw->phw_pmc) configured
677 * when the PMC was started. If this pointer is NULL, the interrupt
678 * is ignored after updating driver statistics. We ensure that this
679 * pointer is set (using an atomic operation if necessary) before the
680 * PMC hardware is started. Conversely, this pointer is unset atomically
681 * only after the PMC hardware is stopped.
683 * We ensure that everything needed for the operation of an
684 * interrupt handler is available without it needing to acquire any
685 * locks. We also ensure that a PMC's software state is destroyed only
686 * after the PMC is taken off hardware (on all CPUs).
688 * - Context-switch handling with process-private PMCs needs more
691 * A given process may be the target of multiple PMCs. For example,
692 * PMCATTACH and PMCDETACH may be requested by a process on one CPU
693 * while the target process is running on another. A PMC could also
694 * be getting released because its owner is exiting. We tackle
695 * these situations in the following manner:
697 * - each target process structure 'pmc_process' has an array
698 * of 'struct pmc *' pointers, one for each hardware PMC.
700 * - At context switch IN time, each "target" PMC in RUNNING state
701 * gets started on hardware and a pointer to each PMC is copied into
702 * the per-cpu phw array. The 'runcount' for the PMC is
705 * - At context switch OUT time, all process-virtual PMCs are stopped
706 * on hardware. The saved value is added to the PMCs value field
707 * only if the PMC is in a non-deleted state (the PMCs state could
708 * have changed during the current time slice).
710 * Note that since in-between a switch IN on a processor and a switch
711 * OUT, the PMC could have been released on another CPU. Therefore
712 * context switch OUT always looks at the hardware state to turn
713 * OFF PMCs and will update a PMC's saved value only if reachable
714 * from the target process record.
716 * - OP PMCRELEASE could be called on a PMC at any time (the PMC could
717 * be attached to many processes at the time of the call and could
718 * be active on multiple CPUs).
720 * We prevent further scheduling of the PMC by marking it as in
721 * state 'DELETED'. If the runcount of the PMC is non-zero then
722 * this PMC is currently running on a CPU somewhere. The thread
723 * doing the PMCRELEASE operation waits by repeatedly doing a
724 * pause() till the runcount comes to zero.
726 * The contents of a PMC descriptor (struct pmc) are protected using
727 * a spin-mutex. In order to save space, we use a mutex pool.
729 * In terms of lock types used by witness(4), we use:
730 * - Type "pmc-sx", used by the global SX lock.
731 * - Type "pmc-sleep", for sleep mutexes used by logger threads.
732 * - Type "pmc-per-proc", for protecting PMC owner descriptors.
733 * - Type "pmc-leaf", used for all other spin mutexes.
737 * save the cpu binding of the current kthread
741 pmc_save_cpu_binding(struct pmc_binding *pb)
743 PMCDBG0(CPU,BND,2, "save-cpu");
744 thread_lock(curthread);
745 pb->pb_bound = sched_is_bound(curthread);
746 pb->pb_cpu = curthread->td_oncpu;
747 thread_unlock(curthread);
748 PMCDBG1(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu);
752 * restore the cpu binding of the current thread
756 pmc_restore_cpu_binding(struct pmc_binding *pb)
758 PMCDBG2(CPU,BND,2, "restore-cpu curcpu=%d restore=%d",
759 curthread->td_oncpu, pb->pb_cpu);
760 thread_lock(curthread);
762 sched_bind(curthread, pb->pb_cpu);
764 sched_unbind(curthread);
765 thread_unlock(curthread);
766 PMCDBG0(CPU,BND,2, "restore-cpu done");
770 * move execution over the specified cpu and bind it there.
774 pmc_select_cpu(int cpu)
776 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
777 ("[pmc,%d] bad cpu number %d", __LINE__, cpu));
779 /* Never move to an inactive CPU. */
780 KASSERT(pmc_cpu_is_active(cpu), ("[pmc,%d] selecting inactive "
781 "CPU %d", __LINE__, cpu));
783 PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d", cpu);
784 thread_lock(curthread);
785 sched_bind(curthread, cpu);
786 thread_unlock(curthread);
788 KASSERT(curthread->td_oncpu == cpu,
789 ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__,
790 cpu, curthread->td_oncpu));
792 PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d ok", cpu);
796 * Force a context switch.
798 * We do this by pause'ing for 1 tick -- invoking mi_switch() is not
799 * guaranteed to force a context switch.
803 pmc_force_context_switch(void)
812 #if defined(__i386__) || defined(__amd64__)
813 if (__predict_true(amd_feature & AMDID_RDTSCP))
818 return get_cyclecount();
823 * Get the file name for an executable. This is a simple wrapper
824 * around vn_fullpath(9).
828 pmc_getfilename(struct vnode *v, char **fullpath, char **freepath)
831 *fullpath = "unknown";
833 vn_fullpath(curthread, v, fullpath, freepath);
837 * remove an process owning PMCs
841 pmc_remove_owner(struct pmc_owner *po)
843 struct pmc *pm, *tmp;
845 sx_assert(&pmc_sx, SX_XLOCKED);
847 PMCDBG1(OWN,ORM,1, "remove-owner po=%p", po);
849 /* Remove descriptor from the owner hash table */
850 LIST_REMOVE(po, po_next);
852 /* release all owned PMC descriptors */
853 LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) {
854 PMCDBG1(OWN,ORM,2, "pmc=%p", pm);
855 KASSERT(pm->pm_owner == po,
856 ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po));
858 pmc_release_pmc_descriptor(pm); /* will unlink from the list */
859 pmc_destroy_pmc_descriptor(pm);
862 KASSERT(po->po_sscount == 0,
863 ("[pmc,%d] SS count not zero", __LINE__));
864 KASSERT(LIST_EMPTY(&po->po_pmcs),
865 ("[pmc,%d] PMC list not empty", __LINE__));
867 /* de-configure the log file if present */
868 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
869 pmclog_deconfigure_log(po);
873 * remove an owner process record if all conditions are met.
877 pmc_maybe_remove_owner(struct pmc_owner *po)
880 PMCDBG1(OWN,OMR,1, "maybe-remove-owner po=%p", po);
883 * Remove owner record if
884 * - this process does not own any PMCs
885 * - this process has not allocated a system-wide sampling buffer
888 if (LIST_EMPTY(&po->po_pmcs) &&
889 ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)) {
890 pmc_remove_owner(po);
891 pmc_destroy_owner_descriptor(po);
896 * Add an association between a target process and a PMC.
900 pmc_link_target_process(struct pmc *pm, struct pmc_process *pp)
903 struct pmc_target *pt;
905 struct pmc_thread *pt_td;
908 sx_assert(&pmc_sx, SX_XLOCKED);
910 KASSERT(pm != NULL && pp != NULL,
911 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
912 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
913 ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d",
914 __LINE__, pm, pp->pp_proc->p_pid));
915 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= ((int) md->pmd_npmc - 1),
916 ("[pmc,%d] Illegal reference count %d for process record %p",
917 __LINE__, pp->pp_refcnt, (void *) pp));
919 ri = PMC_TO_ROWINDEX(pm);
921 PMCDBG3(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p",
925 LIST_FOREACH(pt, &pm->pm_targets, pt_next)
926 if (pt->pt_process == pp)
927 KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets",
931 pt = malloc(sizeof(struct pmc_target), M_PMC, M_WAITOK|M_ZERO);
934 LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next);
936 atomic_store_rel_ptr((uintptr_t *)&pp->pp_pmcs[ri].pp_pmc,
939 if (pm->pm_owner->po_owner == pp->pp_proc)
940 pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER;
943 * Initialize the per-process values at this row index.
945 pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ?
946 pm->pm_sc.pm_reloadcount : 0;
951 /* Confirm that the per-thread values at this row index are cleared. */
952 if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
953 mtx_lock_spin(pp->pp_tdslock);
954 LIST_FOREACH(pt_td, &pp->pp_tds, pt_next) {
955 KASSERT(pt_td->pt_pmcs[ri].pt_pmcval == (pmc_value_t) 0,
956 ("[pmc,%d] pt_pmcval not cleared for pid=%d at "
957 "ri=%d", __LINE__, pp->pp_proc->p_pid, ri));
959 mtx_unlock_spin(pp->pp_tdslock);
965 * Removes the association between a target process and a PMC.
969 pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
973 struct pmc_target *ptgt;
974 struct pmc_thread *pt;
976 sx_assert(&pmc_sx, SX_XLOCKED);
978 KASSERT(pm != NULL && pp != NULL,
979 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
981 KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt <= (int) md->pmd_npmc,
982 ("[pmc,%d] Illegal ref count %d on process record %p",
983 __LINE__, pp->pp_refcnt, (void *) pp));
985 ri = PMC_TO_ROWINDEX(pm);
987 PMCDBG3(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p",
990 KASSERT(pp->pp_pmcs[ri].pp_pmc == pm,
991 ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__,
992 ri, pm, pp->pp_pmcs[ri].pp_pmc));
994 pp->pp_pmcs[ri].pp_pmc = NULL;
995 pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0;
997 /* Clear the per-thread values at this row index. */
998 if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
999 mtx_lock_spin(pp->pp_tdslock);
1000 LIST_FOREACH(pt, &pp->pp_tds, pt_next)
1001 pt->pt_pmcs[ri].pt_pmcval = (pmc_value_t) 0;
1002 mtx_unlock_spin(pp->pp_tdslock);
1005 /* Remove owner-specific flags */
1006 if (pm->pm_owner->po_owner == pp->pp_proc) {
1007 pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS;
1008 pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER;
1013 /* Remove the target process from the PMC structure */
1014 LIST_FOREACH(ptgt, &pm->pm_targets, pt_next)
1015 if (ptgt->pt_process == pp)
1018 KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found "
1019 "in pmc %p", __LINE__, pp->pp_proc, pp, pm));
1021 LIST_REMOVE(ptgt, pt_next);
1024 /* if the PMC now lacks targets, send the owner a SIGIO */
1025 if (LIST_EMPTY(&pm->pm_targets)) {
1026 p = pm->pm_owner->po_owner;
1028 kern_psignal(p, SIGIO);
1031 PMCDBG2(PRC,SIG,2, "signalling proc=%p signal=%d", p,
1037 * Check if PMC 'pm' may be attached to target process 't'.
1041 pmc_can_attach(struct pmc *pm, struct proc *t)
1043 struct proc *o; /* pmc owner */
1044 struct ucred *oc, *tc; /* owner, target credentials */
1045 int decline_attach, i;
1048 * A PMC's owner can always attach that PMC to itself.
1051 if ((o = pm->pm_owner->po_owner) == t)
1065 * The effective uid of the PMC owner should match at least one
1066 * of the {effective,real,saved} uids of the target process.
1069 decline_attach = oc->cr_uid != tc->cr_uid &&
1070 oc->cr_uid != tc->cr_svuid &&
1071 oc->cr_uid != tc->cr_ruid;
1074 * Every one of the target's group ids, must be in the owner's
1077 for (i = 0; !decline_attach && i < tc->cr_ngroups; i++)
1078 decline_attach = !groupmember(tc->cr_groups[i], oc);
1080 /* check the read and saved gids too */
1081 if (decline_attach == 0)
1082 decline_attach = !groupmember(tc->cr_rgid, oc) ||
1083 !groupmember(tc->cr_svgid, oc);
1088 return !decline_attach;
1092 * Attach a process to a PMC.
1096 pmc_attach_one_process(struct proc *p, struct pmc *pm)
1099 char *fullpath, *freepath;
1100 struct pmc_process *pp;
1102 sx_assert(&pmc_sx, SX_XLOCKED);
1104 PMCDBG5(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm,
1105 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1108 * Locate the process descriptor corresponding to process 'p',
1109 * allocating space as needed.
1111 * Verify that rowindex 'pm_rowindex' is free in the process
1114 * If not, allocate space for a descriptor and link the
1115 * process descriptor and PMC.
1117 ri = PMC_TO_ROWINDEX(pm);
1119 /* mark process as using HWPMCs */
1121 p->p_flag |= P_HWPMC;
1124 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL) {
1129 if (pp->pp_pmcs[ri].pp_pmc == pm) {/* already present at slot [ri] */
1134 if (pp->pp_pmcs[ri].pp_pmc != NULL) {
1139 pmc_link_target_process(pm, pp);
1141 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) &&
1142 (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) == 0)
1143 pm->pm_flags |= PMC_F_NEEDS_LOGFILE;
1145 pm->pm_flags |= PMC_F_ATTACH_DONE; /* mark as attached */
1147 /* issue an attach event to a configured log file */
1148 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) {
1149 if (p->p_flag & P_KPROC) {
1150 fullpath = kernelname;
1153 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
1154 pmclog_process_pmcattach(pm, p->p_pid, fullpath);
1156 free(freepath, M_TEMP);
1157 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1158 pmc_log_process_mappings(pm->pm_owner, p);
1164 p->p_flag &= ~P_HWPMC;
1170 * Attach a process and optionally its children
1174 pmc_attach_process(struct proc *p, struct pmc *pm)
1179 sx_assert(&pmc_sx, SX_XLOCKED);
1181 PMCDBG5(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm,
1182 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1186 * If this PMC successfully allowed a GETMSR operation
1187 * in the past, disallow further ATTACHes.
1190 if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0)
1193 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1194 return pmc_attach_one_process(p, pm);
1197 * Traverse all child processes, attaching them to
1201 sx_slock(&proctree_lock);
1206 if ((error = pmc_attach_one_process(p, pm)) != 0)
1208 if (!LIST_EMPTY(&p->p_children))
1209 p = LIST_FIRST(&p->p_children);
1213 if (LIST_NEXT(p, p_sibling)) {
1214 p = LIST_NEXT(p, p_sibling);
1222 (void) pmc_detach_process(top, pm);
1225 sx_sunlock(&proctree_lock);
1230 * Detach a process from a PMC. If there are no other PMCs tracking
1231 * this process, remove the process structure from its hash table. If
1232 * 'flags' contains PMC_FLAG_REMOVE, then free the process structure.
1236 pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags)
1239 struct pmc_process *pp;
1241 sx_assert(&pmc_sx, SX_XLOCKED);
1244 ("[pmc,%d] null pm pointer", __LINE__));
1246 ri = PMC_TO_ROWINDEX(pm);
1248 PMCDBG6(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x",
1249 pm, ri, p, p->p_pid, p->p_comm, flags);
1251 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
1254 if (pp->pp_pmcs[ri].pp_pmc != pm)
1257 pmc_unlink_target_process(pm, pp);
1259 /* Issue a detach entry if a log file is configured */
1260 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE)
1261 pmclog_process_pmcdetach(pm, p->p_pid);
1264 * If there are no PMCs targeting this process, we remove its
1265 * descriptor from the target hash table and unset the P_HWPMC
1266 * flag in the struct proc.
1268 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc,
1269 ("[pmc,%d] Illegal refcnt %d for process struct %p",
1270 __LINE__, pp->pp_refcnt, pp));
1272 if (pp->pp_refcnt != 0) /* still a target of some PMC */
1275 pmc_remove_process_descriptor(pp);
1277 if (flags & PMC_FLAG_REMOVE)
1278 pmc_destroy_process_descriptor(pp);
1281 p->p_flag &= ~P_HWPMC;
1288 * Detach a process and optionally its descendants from a PMC.
1292 pmc_detach_process(struct proc *p, struct pmc *pm)
1296 sx_assert(&pmc_sx, SX_XLOCKED);
1298 PMCDBG5(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm,
1299 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1301 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1302 return pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1305 * Traverse all children, detaching them from this PMC. We
1306 * ignore errors since we could be detaching a PMC from a
1307 * partially attached proc tree.
1310 sx_slock(&proctree_lock);
1315 (void) pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1317 if (!LIST_EMPTY(&p->p_children))
1318 p = LIST_FIRST(&p->p_children);
1322 if (LIST_NEXT(p, p_sibling)) {
1323 p = LIST_NEXT(p, p_sibling);
1331 sx_sunlock(&proctree_lock);
1333 if (LIST_EMPTY(&pm->pm_targets))
1334 pm->pm_flags &= ~PMC_F_ATTACH_DONE;
1341 * Thread context switch IN
1345 pmc_process_csw_in(struct thread *td)
1348 unsigned int adjri, ri;
1353 pmc_value_t newvalue;
1354 struct pmc_process *pp;
1355 struct pmc_thread *pt;
1356 struct pmc_classdep *pcd;
1360 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL)
1363 KASSERT(pp->pp_proc == td->td_proc,
1364 ("[pmc,%d] not my thread state", __LINE__));
1366 critical_enter(); /* no preemption from this point */
1368 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1370 PMCDBG5(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1371 p->p_pid, p->p_comm, pp);
1373 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1374 ("[pmc,%d] weird CPU id %d", __LINE__, cpu));
1378 for (ri = 0; ri < md->pmd_npmc; ri++) {
1380 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL)
1383 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
1384 ("[pmc,%d] Target PMC in non-virtual mode (%d)",
1385 __LINE__, PMC_TO_MODE(pm)));
1387 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1388 ("[pmc,%d] Row index mismatch pmc %d != ri %d",
1389 __LINE__, PMC_TO_ROWINDEX(pm), ri));
1392 * Only PMCs that are marked as 'RUNNING' need
1393 * be placed on hardware.
1396 if (pm->pm_state != PMC_STATE_RUNNING)
1399 KASSERT(counter_u64_fetch(pm->pm_runcount) >= 0,
1400 ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm,
1401 (unsigned long)counter_u64_fetch(pm->pm_runcount)));
1403 /* increment PMC runcount */
1404 counter_u64_add(pm->pm_runcount, 1);
1406 /* configure the HWPMC we are going to use. */
1407 pcd = pmc_ri_to_classdep(md, ri, &adjri);
1408 pcd->pcd_config_pmc(cpu, adjri, pm);
1410 phw = pc->pc_hwpmcs[ri];
1412 KASSERT(phw != NULL,
1413 ("[pmc,%d] null hw pointer", __LINE__));
1415 KASSERT(phw->phw_pmc == pm,
1416 ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__,
1420 * Write out saved value and start the PMC.
1422 * Sampling PMCs use a per-thread value, while
1423 * counting mode PMCs use a per-pmc value that is
1424 * inherited across descendants.
1426 if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
1428 pt = pmc_find_thread_descriptor(pp, td,
1432 ("[pmc,%d] No thread found for td=%p", __LINE__,
1435 mtx_pool_lock_spin(pmc_mtxpool, pm);
1438 * If we have a thread descriptor, use the per-thread
1439 * counter in the descriptor. If not, we will use
1440 * a per-process counter.
1442 * TODO: Remove the per-process "safety net" once
1443 * we have thoroughly tested that we don't hit the
1447 if (pt->pt_pmcs[ri].pt_pmcval > 0)
1448 newvalue = pt->pt_pmcs[ri].pt_pmcval;
1450 newvalue = pm->pm_sc.pm_reloadcount;
1453 * Use the saved value calculated after the most
1454 * recent time a thread using the shared counter
1455 * switched out. Reset the saved count in case
1456 * another thread from this process switches in
1457 * before any threads switch out.
1460 newvalue = pp->pp_pmcs[ri].pp_pmcval;
1461 pp->pp_pmcs[ri].pp_pmcval =
1462 pm->pm_sc.pm_reloadcount;
1464 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1465 KASSERT(newvalue > 0 && newvalue <=
1466 pm->pm_sc.pm_reloadcount,
1467 ("[pmc,%d] pmcval outside of expected range cpu=%d "
1468 "ri=%d pmcval=%jx pm_reloadcount=%jx", __LINE__,
1469 cpu, ri, newvalue, pm->pm_sc.pm_reloadcount));
1471 KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC,
1472 ("[pmc,%d] illegal mode=%d", __LINE__,
1474 mtx_pool_lock_spin(pmc_mtxpool, pm);
1475 newvalue = PMC_PCPU_SAVED(cpu, ri) =
1476 pm->pm_gv.pm_savedvalue;
1477 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1480 PMCDBG3(CSW,SWI,1,"cpu=%d ri=%d new=%jd", cpu, ri, newvalue);
1482 pcd->pcd_write_pmc(cpu, adjri, newvalue);
1484 /* If a sampling mode PMC, reset stalled state. */
1485 if (PMC_TO_MODE(pm) == PMC_MODE_TS)
1486 pm->pm_pcpu_state[cpu].pps_stalled = 0;
1488 /* Indicate that we desire this to run. */
1489 pm->pm_pcpu_state[cpu].pps_cpustate = 1;
1491 /* Start the PMC. */
1492 pcd->pcd_start_pmc(cpu, adjri);
1496 * perform any other architecture/cpu dependent thread
1497 * switch-in actions.
1500 (void) (*md->pmd_switch_in)(pc, pp);
1507 * Thread context switch OUT.
1511 pmc_process_csw_out(struct thread *td)
1519 pmc_value_t newvalue;
1520 unsigned int adjri, ri;
1521 struct pmc_process *pp;
1522 struct pmc_thread *pt = NULL;
1523 struct pmc_classdep *pcd;
1527 * Locate our process descriptor; this may be NULL if
1528 * this process is exiting and we have already removed
1529 * the process from the target process table.
1531 * Note that due to kernel preemption, multiple
1532 * context switches may happen while the process is
1535 * Note also that if the target process cannot be
1536 * found we still need to deconfigure any PMCs that
1537 * are currently running on hardware.
1541 pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE);
1549 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1551 PMCDBG5(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1552 p->p_pid, p->p_comm, pp);
1554 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1555 ("[pmc,%d weird CPU id %d", __LINE__, cpu));
1560 * When a PMC gets unlinked from a target PMC, it will
1561 * be removed from the target's pp_pmc[] array.
1563 * However, on a MP system, the target could have been
1564 * executing on another CPU at the time of the unlink.
1565 * So, at context switch OUT time, we need to look at
1566 * the hardware to determine if a PMC is scheduled on
1570 for (ri = 0; ri < md->pmd_npmc; ri++) {
1572 pcd = pmc_ri_to_classdep(md, ri, &adjri);
1574 (void) (*pcd->pcd_get_config)(cpu, adjri, &pm);
1576 if (pm == NULL) /* nothing at this row index */
1579 mode = PMC_TO_MODE(pm);
1580 if (!PMC_IS_VIRTUAL_MODE(mode))
1581 continue; /* not a process virtual PMC */
1583 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1584 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
1585 __LINE__, PMC_TO_ROWINDEX(pm), ri));
1588 * Change desired state, and then stop if not stalled.
1589 * This two-step dance should avoid race conditions where
1590 * an interrupt re-enables the PMC after this code has
1591 * already checked the pm_stalled flag.
1593 pm->pm_pcpu_state[cpu].pps_cpustate = 0;
1594 if (pm->pm_pcpu_state[cpu].pps_stalled == 0)
1595 pcd->pcd_stop_pmc(cpu, adjri);
1597 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,
1598 ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm,
1599 (unsigned long)counter_u64_fetch(pm->pm_runcount)));
1601 /* reduce this PMC's runcount */
1602 counter_u64_add(pm->pm_runcount, -1);
1605 * If this PMC is associated with this process,
1609 if (pm->pm_state != PMC_STATE_DELETED && pp != NULL &&
1610 pp->pp_pmcs[ri].pp_pmc != NULL) {
1611 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
1612 ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__,
1613 pm, ri, pp->pp_pmcs[ri].pp_pmc));
1615 KASSERT(pp->pp_refcnt > 0,
1616 ("[pmc,%d] pp refcnt = %d", __LINE__,
1619 pcd->pcd_read_pmc(cpu, adjri, &newvalue);
1621 if (mode == PMC_MODE_TS) {
1622 PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d val=%jd (samp)",
1626 pt = pmc_find_thread_descriptor(pp, td,
1630 ("[pmc,%d] No thread found for td=%p",
1633 mtx_pool_lock_spin(pmc_mtxpool, pm);
1636 * If we have a thread descriptor, save the
1637 * per-thread counter in the descriptor. If not,
1638 * we will update the per-process counter.
1640 * TODO: Remove the per-process "safety net"
1641 * once we have thoroughly tested that we
1642 * don't hit the above assert.
1645 pt->pt_pmcs[ri].pt_pmcval = newvalue;
1648 * For sampling process-virtual PMCs,
1649 * newvalue is the number of events to
1650 * be seen until the next sampling
1651 * interrupt. We can just add the events
1652 * left from this invocation to the
1653 * counter, then adjust in case we
1654 * overflow our range.
1656 * (Recall that we reload the counter
1657 * every time we use it.)
1659 pp->pp_pmcs[ri].pp_pmcval += newvalue;
1660 if (pp->pp_pmcs[ri].pp_pmcval >
1661 pm->pm_sc.pm_reloadcount)
1662 pp->pp_pmcs[ri].pp_pmcval -=
1663 pm->pm_sc.pm_reloadcount;
1665 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1667 tmp = newvalue - PMC_PCPU_SAVED(cpu,ri);
1669 PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d tmp=%jd (count)",
1673 * For counting process-virtual PMCs,
1674 * we expect the count to be
1675 * increasing monotonically, modulo a 64
1679 ("[pmc,%d] negative increment cpu=%d "
1680 "ri=%d newvalue=%jx saved=%jx "
1681 "incr=%jx", __LINE__, cpu, ri,
1682 newvalue, PMC_PCPU_SAVED(cpu,ri), tmp));
1684 mtx_pool_lock_spin(pmc_mtxpool, pm);
1685 pm->pm_gv.pm_savedvalue += tmp;
1686 pp->pp_pmcs[ri].pp_pmcval += tmp;
1687 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1689 if (pm->pm_flags & PMC_F_LOG_PROCCSW)
1690 pmclog_process_proccsw(pm, pp, tmp, td);
1694 /* mark hardware as free */
1695 pcd->pcd_config_pmc(cpu, adjri, NULL);
1699 * perform any other architecture/cpu dependent thread
1700 * switch out functions.
1703 (void) (*md->pmd_switch_out)(pc, pp);
1709 * A new thread for a process.
1712 pmc_process_thread_add(struct thread *td)
1714 struct pmc_process *pmc;
1716 pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE);
1718 pmc_find_thread_descriptor(pmc, td, PMC_FLAG_ALLOCATE);
1722 * A thread delete for a process.
1725 pmc_process_thread_delete(struct thread *td)
1727 struct pmc_process *pmc;
1729 pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE);
1731 pmc_thread_descriptor_pool_free(pmc_find_thread_descriptor(pmc,
1732 td, PMC_FLAG_REMOVE));
1736 * A userret() call for a thread.
1739 pmc_process_thread_userret(struct thread *td)
1742 pmc_capture_user_callchain(curcpu, PMC_UR, td->td_frame);
1747 * A mapping change for a process.
1751 pmc_process_mmap(struct thread *td, struct pmckern_map_in *pkm)
1755 char *fullpath, *freepath;
1756 const struct pmc *pm;
1757 struct pmc_owner *po;
1758 const struct pmc_process *pp;
1760 freepath = fullpath = NULL;
1761 MPASS(!in_epoch(global_epoch_preempt));
1762 pmc_getfilename((struct vnode *) pkm->pm_file, &fullpath, &freepath);
1764 pid = td->td_proc->p_pid;
1767 /* Inform owners of all system-wide sampling PMCs. */
1768 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1769 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1770 pmclog_process_map_in(po, pid, pkm->pm_address, fullpath);
1772 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1776 * Inform sampling PMC owners tracking this process.
1778 for (ri = 0; ri < md->pmd_npmc; ri++)
1779 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
1780 PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1781 pmclog_process_map_in(pm->pm_owner,
1782 pid, pkm->pm_address, fullpath);
1786 free(freepath, M_TEMP);
1792 * Log an munmap request.
1796 pmc_process_munmap(struct thread *td, struct pmckern_map_out *pkm)
1800 struct pmc_owner *po;
1801 const struct pmc *pm;
1802 const struct pmc_process *pp;
1804 pid = td->td_proc->p_pid;
1807 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1808 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1809 pmclog_process_map_out(po, pid, pkm->pm_address,
1810 pkm->pm_address + pkm->pm_size);
1813 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1816 for (ri = 0; ri < md->pmd_npmc; ri++)
1817 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
1818 PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1819 pmclog_process_map_out(pm->pm_owner, pid,
1820 pkm->pm_address, pkm->pm_address + pkm->pm_size);
1824 * Log mapping information about the kernel.
1828 pmc_log_kernel_mappings(struct pmc *pm)
1830 struct pmc_owner *po;
1831 struct pmckern_map_in *km, *kmbase;
1833 MPASS(in_epoch(global_epoch_preempt) || sx_xlocked(&pmc_sx));
1834 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
1835 ("[pmc,%d] non-sampling PMC (%p) desires mapping information",
1836 __LINE__, (void *) pm));
1840 if (po->po_flags & PMC_PO_INITIAL_MAPPINGS_DONE)
1842 if (PMC_TO_MODE(pm) == PMC_MODE_SS)
1843 pmc_process_allproc(pm);
1845 * Log the current set of kernel modules.
1847 kmbase = linker_hwpmc_list_objects();
1848 for (km = kmbase; km->pm_file != NULL; km++) {
1849 PMCDBG2(LOG,REG,1,"%s %p", (char *) km->pm_file,
1850 (void *) km->pm_address);
1851 pmclog_process_map_in(po, (pid_t) -1, km->pm_address,
1854 free(kmbase, M_LINKER);
1856 po->po_flags |= PMC_PO_INITIAL_MAPPINGS_DONE;
1860 * Log the mappings for a single process.
1864 pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
1869 vm_map_entry_t entry;
1870 vm_offset_t last_end;
1871 u_int last_timestamp;
1872 struct vnode *last_vp;
1873 vm_offset_t start_addr;
1874 vm_object_t obj, lobj, tobj;
1875 char *fullpath, *freepath;
1878 last_end = (vm_offset_t) 0;
1879 fullpath = freepath = NULL;
1881 if ((vm = vmspace_acquire_ref(p)) == NULL)
1885 vm_map_lock_read(map);
1887 for (entry = map->header.next; entry != &map->header; entry = entry->next) {
1889 if (entry == NULL) {
1890 PMCDBG2(LOG,OPS,2, "hwpmc: vm_map entry unexpectedly "
1891 "NULL! pid=%d vm_map=%p\n", p->p_pid, map);
1896 * We only care about executable map entries.
1898 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) ||
1899 !(entry->protection & VM_PROT_EXECUTE) ||
1900 (entry->object.vm_object == NULL)) {
1904 obj = entry->object.vm_object;
1905 VM_OBJECT_RLOCK(obj);
1908 * Walk the backing_object list to find the base
1909 * (non-shadowed) vm_object.
1911 for (lobj = tobj = obj; tobj != NULL; tobj = tobj->backing_object) {
1913 VM_OBJECT_RLOCK(tobj);
1915 VM_OBJECT_RUNLOCK(lobj);
1920 * At this point lobj is the base vm_object and it is locked.
1923 PMCDBG3(LOG,OPS,2, "hwpmc: lobj unexpectedly NULL! pid=%d "
1924 "vm_map=%p vm_obj=%p\n", p->p_pid, map, obj);
1925 VM_OBJECT_RUNLOCK(obj);
1929 vp = vm_object_vnode(lobj);
1932 VM_OBJECT_RUNLOCK(lobj);
1933 VM_OBJECT_RUNLOCK(obj);
1938 * Skip contiguous regions that point to the same
1939 * vnode, so we don't emit redundant MAP-IN
1942 if (entry->start == last_end && vp == last_vp) {
1943 last_end = entry->end;
1945 VM_OBJECT_RUNLOCK(lobj);
1946 VM_OBJECT_RUNLOCK(obj);
1951 * We don't want to keep the proc's vm_map or this
1952 * vm_object locked while we walk the pathname, since
1953 * vn_fullpath() can sleep. However, if we drop the
1954 * lock, it's possible for concurrent activity to
1955 * modify the vm_map list. To protect against this,
1956 * we save the vm_map timestamp before we release the
1957 * lock, and check it after we reacquire the lock
1960 start_addr = entry->start;
1961 last_end = entry->end;
1962 last_timestamp = map->timestamp;
1963 vm_map_unlock_read(map);
1967 VM_OBJECT_RUNLOCK(lobj);
1969 VM_OBJECT_RUNLOCK(obj);
1972 pmc_getfilename(vp, &fullpath, &freepath);
1978 pmclog_process_map_in(po, p->p_pid, start_addr, fullpath);
1980 free(freepath, M_TEMP);
1982 vm_map_lock_read(map);
1985 * If our saved timestamp doesn't match, this means
1986 * that the vm_map was modified out from under us and
1987 * we can't trust our current "entry" pointer. Do a
1988 * new lookup for this entry. If there is no entry
1989 * for this address range, vm_map_lookup_entry() will
1990 * return the previous one, so we always want to go to
1991 * entry->next on the next loop iteration.
1993 * There is an edge condition here that can occur if
1994 * there is no entry at or before this address. In
1995 * this situation, vm_map_lookup_entry returns
1996 * &map->header, which would cause our loop to abort
1997 * without processing the rest of the map. However,
1998 * in practice this will never happen for process
1999 * vm_map. This is because the executable's text
2000 * segment is the first mapping in the proc's address
2001 * space, and this mapping is never removed until the
2002 * process exits, so there will always be a non-header
2003 * entry at or before the requested address for
2004 * vm_map_lookup_entry to return.
2006 if (map->timestamp != last_timestamp)
2007 vm_map_lookup_entry(map, last_end - 1, &entry);
2010 vm_map_unlock_read(map);
2016 * Log mappings for all processes in the system.
2020 pmc_log_all_process_mappings(struct pmc_owner *po)
2022 struct proc *p, *top;
2024 sx_assert(&pmc_sx, SX_XLOCKED);
2026 if ((p = pfind(1)) == NULL)
2027 panic("[pmc,%d] Cannot find init", __LINE__);
2031 sx_slock(&proctree_lock);
2036 pmc_log_process_mappings(po, p);
2037 if (!LIST_EMPTY(&p->p_children))
2038 p = LIST_FIRST(&p->p_children);
2042 if (LIST_NEXT(p, p_sibling)) {
2043 p = LIST_NEXT(p, p_sibling);
2050 sx_sunlock(&proctree_lock);
2054 * The 'hook' invoked from the kernel proper
2059 const char *pmc_hooknames[] = {
2060 /* these strings correspond to PMC_FN_* in <sys/pmckern.h> */
2083 pmc_hook_handler(struct thread *td, int function, void *arg)
2087 PMCDBG4(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function,
2088 pmc_hooknames[function], arg);
2097 case PMC_FN_PROCESS_EXEC:
2099 char *fullpath, *freepath;
2101 int is_using_hwpmcs;
2104 struct pmc_owner *po;
2105 struct pmc_process *pp;
2106 struct pmckern_procexec *pk;
2108 sx_assert(&pmc_sx, SX_XLOCKED);
2111 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
2113 pk = (struct pmckern_procexec *) arg;
2116 /* Inform owners of SS mode PMCs of the exec event. */
2117 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
2118 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
2119 pmclog_process_procexec(po, PMC_ID_INVALID,
2120 p->p_pid, pk->pm_entryaddr, fullpath);
2124 is_using_hwpmcs = p->p_flag & P_HWPMC;
2127 if (!is_using_hwpmcs) {
2129 free(freepath, M_TEMP);
2134 * PMCs are not inherited across an exec(): remove any
2135 * PMCs that this process is the owner of.
2138 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
2139 pmc_remove_owner(po);
2140 pmc_destroy_owner_descriptor(po);
2144 * If the process being exec'ed is not the target of any
2147 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL) {
2149 free(freepath, M_TEMP);
2154 * Log the exec event to all monitoring owners. Skip
2155 * owners who have already received the event because
2156 * they had system sampling PMCs active.
2158 for (ri = 0; ri < md->pmd_npmc; ri++)
2159 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
2161 if (po->po_sscount == 0 &&
2162 po->po_flags & PMC_PO_OWNS_LOGFILE)
2163 pmclog_process_procexec(po, pm->pm_id,
2164 p->p_pid, pk->pm_entryaddr,
2169 free(freepath, M_TEMP);
2172 PMCDBG4(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d",
2173 p, p->p_pid, p->p_comm, pk->pm_credentialschanged);
2175 if (pk->pm_credentialschanged == 0) /* no change */
2179 * If the newly exec()'ed process has a different credential
2180 * than before, allow it to be the target of a PMC only if
2181 * the PMC's owner has sufficient privilege.
2184 for (ri = 0; ri < md->pmd_npmc; ri++)
2185 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL)
2186 if (pmc_can_attach(pm, td->td_proc) != 0)
2187 pmc_detach_one_process(td->td_proc,
2190 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc,
2191 ("[pmc,%d] Illegal ref count %d on pp %p", __LINE__,
2192 pp->pp_refcnt, pp));
2195 * If this process is no longer the target of any
2196 * PMCs, we can remove the process entry and free
2200 if (pp->pp_refcnt == 0) {
2201 pmc_remove_process_descriptor(pp);
2202 pmc_destroy_process_descriptor(pp);
2210 pmc_process_csw_in(td);
2213 case PMC_FN_CSW_OUT:
2214 pmc_process_csw_out(td);
2218 * Process accumulated PC samples.
2220 * This function is expected to be called by hardclock() for
2221 * each CPU that has accumulated PC samples.
2223 * This function is to be executed on the CPU whose samples
2224 * are being processed.
2226 case PMC_FN_DO_SAMPLES:
2229 * Clear the cpu specific bit in the CPU mask before
2230 * do the rest of the processing. If the NMI handler
2231 * gets invoked after the "atomic_clear_int()" call
2232 * below but before "pmc_process_samples()" gets
2233 * around to processing the interrupt, then we will
2234 * come back here at the next hardclock() tick (and
2235 * may find nothing to do if "pmc_process_samples()"
2236 * had already processed the interrupt). We don't
2237 * lose the interrupt sample.
2239 DPCPU_SET(pmc_sampled, 0);
2240 cpu = PCPU_GET(cpuid);
2241 pmc_process_samples(cpu, PMC_HR);
2242 pmc_process_samples(cpu, PMC_SR);
2243 pmc_process_samples(cpu, PMC_UR);
2247 pmc_process_mmap(td, (struct pmckern_map_in *) arg);
2251 MPASS(in_epoch(global_epoch_preempt) || sx_xlocked(&pmc_sx));
2252 pmc_process_munmap(td, (struct pmckern_map_out *) arg);
2255 case PMC_FN_PROC_CREATE_LOG:
2256 pmc_process_proccreate((struct proc *)arg);
2259 case PMC_FN_USER_CALLCHAIN:
2261 * Record a call chain.
2263 KASSERT(td == curthread, ("[pmc,%d] td != curthread",
2266 pmc_capture_user_callchain(PCPU_GET(cpuid), PMC_HR,
2267 (struct trapframe *) arg);
2269 KASSERT(td->td_pinned == 1,
2270 ("[pmc,%d] invalid td_pinned value", __LINE__));
2271 sched_unpin(); /* Can migrate safely now. */
2273 td->td_pflags &= ~TDP_CALLCHAIN;
2276 case PMC_FN_USER_CALLCHAIN_SOFT:
2278 * Record a call chain.
2280 KASSERT(td == curthread, ("[pmc,%d] td != curthread",
2283 cpu = PCPU_GET(cpuid);
2284 pmc_capture_user_callchain(cpu, PMC_SR,
2285 (struct trapframe *) arg);
2287 KASSERT(td->td_pinned == 1,
2288 ("[pmc,%d] invalid td_pinned value", __LINE__));
2290 sched_unpin(); /* Can migrate safely now. */
2292 td->td_pflags &= ~TDP_CALLCHAIN;
2295 case PMC_FN_SOFT_SAMPLING:
2297 * Call soft PMC sampling intr.
2299 pmc_soft_intr((struct pmckern_soft *) arg);
2302 case PMC_FN_THR_CREATE:
2303 pmc_process_thread_add(td);
2304 pmc_process_threadcreate(td);
2307 case PMC_FN_THR_CREATE_LOG:
2308 pmc_process_threadcreate(td);
2311 case PMC_FN_THR_EXIT:
2312 KASSERT(td == curthread, ("[pmc,%d] td != curthread",
2314 pmc_process_thread_delete(td);
2315 pmc_process_threadexit(td);
2317 case PMC_FN_THR_EXIT_LOG:
2318 pmc_process_threadexit(td);
2320 case PMC_FN_THR_USERRET:
2321 KASSERT(td == curthread, ("[pmc,%d] td != curthread",
2323 pmc_process_thread_userret(td);
2328 KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function));
2338 * allocate a 'struct pmc_owner' descriptor in the owner hash table.
2341 static struct pmc_owner *
2342 pmc_allocate_owner_descriptor(struct proc *p)
2345 struct pmc_owner *po;
2346 struct pmc_ownerhash *poh;
2348 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
2349 poh = &pmc_ownerhash[hindex];
2351 /* allocate space for N pointers and one descriptor struct */
2352 po = malloc(sizeof(struct pmc_owner), M_PMC, M_WAITOK|M_ZERO);
2354 LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */
2356 TAILQ_INIT(&po->po_logbuffers);
2357 mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc-per-proc", MTX_SPIN);
2359 PMCDBG4(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p",
2360 p, p->p_pid, p->p_comm, po);
2366 pmc_destroy_owner_descriptor(struct pmc_owner *po)
2369 PMCDBG4(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)",
2370 po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm);
2372 mtx_destroy(&po->po_mtx);
2377 * Allocate a thread descriptor from the free pool.
2379 * NOTE: This *can* return NULL.
2381 static struct pmc_thread *
2382 pmc_thread_descriptor_pool_alloc(void)
2384 struct pmc_thread *pt;
2386 mtx_lock_spin(&pmc_threadfreelist_mtx);
2387 if ((pt = LIST_FIRST(&pmc_threadfreelist)) != NULL) {
2388 LIST_REMOVE(pt, pt_next);
2389 pmc_threadfreelist_entries--;
2391 mtx_unlock_spin(&pmc_threadfreelist_mtx);
2397 * Add a thread descriptor to the free pool. We use this instead of free()
2398 * to maintain a cache of free entries. Additionally, we can safely call
2399 * this function when we cannot call free(), such as in a critical section.
2403 pmc_thread_descriptor_pool_free(struct pmc_thread *pt)
2409 memset(pt, 0, THREADENTRY_SIZE);
2410 mtx_lock_spin(&pmc_threadfreelist_mtx);
2411 LIST_INSERT_HEAD(&pmc_threadfreelist, pt, pt_next);
2412 pmc_threadfreelist_entries++;
2413 if (pmc_threadfreelist_entries > pmc_threadfreelist_max)
2414 taskqueue_enqueue(taskqueue_fast, &free_task);
2415 mtx_unlock_spin(&pmc_threadfreelist_mtx);
2419 * An asynchronous task to manage the free list.
2422 pmc_thread_descriptor_pool_free_task(void *arg __unused, int pending __unused)
2424 struct pmc_thread *pt;
2425 LIST_HEAD(, pmc_thread) tmplist;
2428 LIST_INIT(&tmplist);
2430 /* Determine what changes, if any, we need to make. */
2431 mtx_lock_spin(&pmc_threadfreelist_mtx);
2432 delta = pmc_threadfreelist_entries - pmc_threadfreelist_max;
2433 while (delta > 0 && (pt = LIST_FIRST(&pmc_threadfreelist)) != NULL) {
2435 pmc_threadfreelist_entries--;
2436 LIST_REMOVE(pt, pt_next);
2437 LIST_INSERT_HEAD(&tmplist, pt, pt_next);
2439 mtx_unlock_spin(&pmc_threadfreelist_mtx);
2441 /* If there are entries to free, free them. */
2442 while (!LIST_EMPTY(&tmplist)) {
2443 pt = LIST_FIRST(&tmplist);
2444 LIST_REMOVE(pt, pt_next);
2450 * Drain the thread free pool, freeing all allocations.
2453 pmc_thread_descriptor_pool_drain()
2455 struct pmc_thread *pt, *next;
2457 LIST_FOREACH_SAFE(pt, &pmc_threadfreelist, pt_next, next) {
2458 LIST_REMOVE(pt, pt_next);
2464 * find the descriptor corresponding to thread 'td', adding or removing it
2465 * as specified by 'mode'.
2467 * Note that this supports additional mode flags in addition to those
2468 * supported by pmc_find_process_descriptor():
2469 * PMC_FLAG_NOWAIT: Causes the function to not wait for mallocs.
2470 * This makes it safe to call while holding certain other locks.
2473 static struct pmc_thread *
2474 pmc_find_thread_descriptor(struct pmc_process *pp, struct thread *td,
2477 struct pmc_thread *pt = NULL, *ptnew = NULL;
2480 KASSERT(td != NULL, ("[pmc,%d] called to add NULL td", __LINE__));
2483 * Pre-allocate memory in the PMC_FLAG_ALLOCATE case prior to
2484 * acquiring the lock.
2486 if (mode & PMC_FLAG_ALLOCATE) {
2487 if ((ptnew = pmc_thread_descriptor_pool_alloc()) == NULL) {
2488 wait_flag = M_WAITOK;
2489 if ((mode & PMC_FLAG_NOWAIT) || in_epoch(global_epoch_preempt))
2490 wait_flag = M_NOWAIT;
2492 ptnew = malloc(THREADENTRY_SIZE, M_PMC,
2497 mtx_lock_spin(pp->pp_tdslock);
2499 LIST_FOREACH(pt, &pp->pp_tds, pt_next)
2500 if (pt->pt_td == td)
2503 if ((mode & PMC_FLAG_REMOVE) && pt != NULL)
2504 LIST_REMOVE(pt, pt_next);
2506 if ((mode & PMC_FLAG_ALLOCATE) && pt == NULL && ptnew != NULL) {
2510 LIST_INSERT_HEAD(&pp->pp_tds, pt, pt_next);
2513 mtx_unlock_spin(pp->pp_tdslock);
2515 if (ptnew != NULL) {
2523 * Try to add thread descriptors for each thread in a process.
2527 pmc_add_thread_descriptors_from_proc(struct proc *p, struct pmc_process *pp)
2529 struct thread *curtd;
2530 struct pmc_thread **tdlist;
2531 int i, tdcnt, tdlistsz;
2533 KASSERT(!PROC_LOCKED(p), ("[pmc,%d] proc unexpectedly locked",
2537 tdlistsz = roundup2(tdcnt, 32);
2540 tdlist = malloc(sizeof(struct pmc_thread*) * tdlistsz, M_TEMP, M_WAITOK);
2543 FOREACH_THREAD_IN_PROC(p, curtd)
2545 if (tdcnt >= tdlistsz) {
2547 free(tdlist, M_TEMP);
2551 * Try to add each thread to the list without sleeping. If unable,
2552 * add to a queue to retry after dropping the process lock.
2555 FOREACH_THREAD_IN_PROC(p, curtd) {
2556 tdlist[tdcnt] = pmc_find_thread_descriptor(pp, curtd,
2557 PMC_FLAG_ALLOCATE|PMC_FLAG_NOWAIT);
2558 if (tdlist[tdcnt] == NULL) {
2560 for (i = 0; i <= tdcnt; i++)
2561 pmc_thread_descriptor_pool_free(tdlist[i]);
2562 free(tdlist, M_TEMP);
2568 free(tdlist, M_TEMP);
2572 * find the descriptor corresponding to process 'p', adding or removing it
2573 * as specified by 'mode'.
2576 static struct pmc_process *
2577 pmc_find_process_descriptor(struct proc *p, uint32_t mode)
2580 struct pmc_process *pp, *ppnew;
2581 struct pmc_processhash *pph;
2583 hindex = PMC_HASH_PTR(p, pmc_processhashmask);
2584 pph = &pmc_processhash[hindex];
2589 * Pre-allocate memory in the PMC_FLAG_ALLOCATE case since we
2590 * cannot call malloc(9) once we hold a spin lock.
2592 if (mode & PMC_FLAG_ALLOCATE)
2593 ppnew = malloc(sizeof(struct pmc_process) + md->pmd_npmc *
2594 sizeof(struct pmc_targetstate), M_PMC, M_WAITOK|M_ZERO);
2596 mtx_lock_spin(&pmc_processhash_mtx);
2597 LIST_FOREACH(pp, pph, pp_next)
2598 if (pp->pp_proc == p)
2601 if ((mode & PMC_FLAG_REMOVE) && pp != NULL)
2602 LIST_REMOVE(pp, pp_next);
2604 if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL &&
2607 LIST_INIT(&ppnew->pp_tds);
2608 ppnew->pp_tdslock = mtx_pool_find(pmc_mtxpool, ppnew);
2609 LIST_INSERT_HEAD(pph, ppnew, pp_next);
2610 mtx_unlock_spin(&pmc_processhash_mtx);
2614 /* Add thread descriptors for this process' current threads. */
2615 pmc_add_thread_descriptors_from_proc(p, pp);
2618 mtx_unlock_spin(&pmc_processhash_mtx);
2627 * remove a process descriptor from the process hash table.
2631 pmc_remove_process_descriptor(struct pmc_process *pp)
2633 KASSERT(pp->pp_refcnt == 0,
2634 ("[pmc,%d] Removing process descriptor %p with count %d",
2635 __LINE__, pp, pp->pp_refcnt));
2637 mtx_lock_spin(&pmc_processhash_mtx);
2638 LIST_REMOVE(pp, pp_next);
2639 mtx_unlock_spin(&pmc_processhash_mtx);
2643 * destroy a process descriptor.
2647 pmc_destroy_process_descriptor(struct pmc_process *pp)
2649 struct pmc_thread *pmc_td;
2651 while ((pmc_td = LIST_FIRST(&pp->pp_tds)) != NULL) {
2652 LIST_REMOVE(pmc_td, pt_next);
2653 pmc_thread_descriptor_pool_free(pmc_td);
2660 * find an owner descriptor corresponding to proc 'p'
2663 static struct pmc_owner *
2664 pmc_find_owner_descriptor(struct proc *p)
2667 struct pmc_owner *po;
2668 struct pmc_ownerhash *poh;
2670 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
2671 poh = &pmc_ownerhash[hindex];
2674 LIST_FOREACH(po, poh, po_next)
2675 if (po->po_owner == p)
2678 PMCDBG5(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> "
2679 "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po);
2685 * pmc_allocate_pmc_descriptor
2687 * Allocate a pmc descriptor and initialize its
2692 pmc_allocate_pmc_descriptor(void)
2696 pmc = malloc(sizeof(struct pmc), M_PMC, M_WAITOK|M_ZERO);
2697 pmc->pm_runcount = counter_u64_alloc(M_WAITOK);
2698 pmc->pm_pcpu_state = malloc(sizeof(struct pmc_pcpu_state)*mp_ncpus, M_PMC, M_WAITOK|M_ZERO);
2699 PMCDBG1(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc);
2705 * Destroy a pmc descriptor.
2709 pmc_destroy_pmc_descriptor(struct pmc *pm)
2712 KASSERT(pm->pm_state == PMC_STATE_DELETED ||
2713 pm->pm_state == PMC_STATE_FREE,
2714 ("[pmc,%d] destroying non-deleted PMC", __LINE__));
2715 KASSERT(LIST_EMPTY(&pm->pm_targets),
2716 ("[pmc,%d] destroying pmc with targets", __LINE__));
2717 KASSERT(pm->pm_owner == NULL,
2718 ("[pmc,%d] destroying pmc attached to an owner", __LINE__));
2719 KASSERT(counter_u64_fetch(pm->pm_runcount) == 0,
2720 ("[pmc,%d] pmc has non-zero run count %ld", __LINE__,
2721 (unsigned long)counter_u64_fetch(pm->pm_runcount)));
2723 counter_u64_free(pm->pm_runcount);
2724 free(pm->pm_pcpu_state, M_PMC);
2729 pmc_wait_for_pmc_idle(struct pmc *pm)
2732 volatile int maxloop;
2734 maxloop = 100 * pmc_cpu_max();
2737 * Loop (with a forced context switch) till the PMC's runcount
2738 * comes down to zero.
2740 pmclog_flush(pm->pm_owner, 1);
2741 while (counter_u64_fetch(pm->pm_runcount) > 0) {
2742 pmclog_flush(pm->pm_owner, 1);
2745 KASSERT(maxloop > 0,
2746 ("[pmc,%d] (ri%d, rc%ld) waiting too long for "
2747 "pmc to be free", __LINE__,
2748 PMC_TO_ROWINDEX(pm), (unsigned long)counter_u64_fetch(pm->pm_runcount)));
2750 pmc_force_context_switch();
2755 * This function does the following things:
2757 * - detaches the PMC from hardware
2758 * - unlinks all target threads that were attached to it
2759 * - removes the PMC from its owner's list
2760 * - destroys the PMC private mutex
2762 * Once this function completes, the given pmc pointer can be freed by
2763 * calling pmc_destroy_pmc_descriptor().
2767 pmc_release_pmc_descriptor(struct pmc *pm)
2771 u_int adjri, ri, cpu;
2772 struct pmc_owner *po;
2773 struct pmc_binding pb;
2774 struct pmc_process *pp;
2775 struct pmc_classdep *pcd;
2776 struct pmc_target *ptgt, *tmp;
2778 sx_assert(&pmc_sx, SX_XLOCKED);
2780 KASSERT(pm, ("[pmc,%d] null pmc", __LINE__));
2782 ri = PMC_TO_ROWINDEX(pm);
2783 pcd = pmc_ri_to_classdep(md, ri, &adjri);
2784 mode = PMC_TO_MODE(pm);
2786 PMCDBG3(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri,
2790 * First, we take the PMC off hardware.
2793 if (PMC_IS_SYSTEM_MODE(mode)) {
2796 * A system mode PMC runs on a specific CPU. Switch
2797 * to this CPU and turn hardware off.
2799 pmc_save_cpu_binding(&pb);
2801 cpu = PMC_TO_CPU(pm);
2803 pmc_select_cpu(cpu);
2805 /* switch off non-stalled CPUs */
2806 pm->pm_pcpu_state[cpu].pps_cpustate = 0;
2807 if (pm->pm_state == PMC_STATE_RUNNING &&
2808 pm->pm_pcpu_state[cpu].pps_stalled == 0) {
2810 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
2812 KASSERT(phw->phw_pmc == pm,
2813 ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)",
2814 __LINE__, ri, phw->phw_pmc, pm));
2815 PMCDBG2(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri);
2818 pcd->pcd_stop_pmc(cpu, adjri);
2822 PMCDBG2(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri);
2825 pcd->pcd_config_pmc(cpu, adjri, NULL);
2828 /* adjust the global and process count of SS mode PMCs */
2829 if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) {
2832 if (po->po_sscount == 0) {
2833 atomic_subtract_rel_int(&pmc_ss_count, 1);
2834 CK_LIST_REMOVE(po, po_ssnext);
2835 epoch_wait_preempt(global_epoch_preempt);
2839 pm->pm_state = PMC_STATE_DELETED;
2841 pmc_restore_cpu_binding(&pb);
2844 * We could have references to this PMC structure in
2845 * the per-cpu sample queues. Wait for the queue to
2848 pmc_wait_for_pmc_idle(pm);
2850 } else if (PMC_IS_VIRTUAL_MODE(mode)) {
2853 * A virtual PMC could be running on multiple CPUs at
2856 * By marking its state as DELETED, we ensure that
2857 * this PMC is never further scheduled on hardware.
2859 * Then we wait till all CPUs are done with this PMC.
2861 pm->pm_state = PMC_STATE_DELETED;
2864 /* Wait for the PMCs runcount to come to zero. */
2865 pmc_wait_for_pmc_idle(pm);
2868 * At this point the PMC is off all CPUs and cannot be
2869 * freshly scheduled onto a CPU. It is now safe to
2870 * unlink all targets from this PMC. If a
2871 * process-record's refcount falls to zero, we remove
2872 * it from the hash table. The module-wide SX lock
2873 * protects us from races.
2875 LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) {
2876 pp = ptgt->pt_process;
2877 pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */
2879 PMCDBG1(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt);
2882 * If the target process record shows that no
2883 * PMCs are attached to it, reclaim its space.
2886 if (pp->pp_refcnt == 0) {
2887 pmc_remove_process_descriptor(pp);
2888 pmc_destroy_process_descriptor(pp);
2892 cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */
2897 * Release any MD resources
2899 (void) pcd->pcd_release_pmc(cpu, adjri, pm);
2902 * Update row disposition
2905 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm)))
2906 PMC_UNMARK_ROW_STANDALONE(ri);
2908 PMC_UNMARK_ROW_THREAD(ri);
2910 /* unlink from the owner's list */
2912 LIST_REMOVE(pm, pm_next);
2913 pm->pm_owner = NULL;
2918 * Register an owner and a pmc.
2922 pmc_register_owner(struct proc *p, struct pmc *pmc)
2924 struct pmc_owner *po;
2926 sx_assert(&pmc_sx, SX_XLOCKED);
2928 if ((po = pmc_find_owner_descriptor(p)) == NULL)
2929 if ((po = pmc_allocate_owner_descriptor(p)) == NULL)
2932 KASSERT(pmc->pm_owner == NULL,
2933 ("[pmc,%d] attempting to own an initialized PMC", __LINE__));
2936 LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next);
2939 p->p_flag |= P_HWPMC;
2942 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
2943 pmclog_process_pmcallocate(pmc);
2945 PMCDBG2(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p",
2952 * Return the current row disposition:
2954 * > 0 => PROCESS MODE
2955 * < 0 => SYSTEM MODE
2959 pmc_getrowdisp(int ri)
2961 return pmc_pmcdisp[ri];
2965 * Check if a PMC at row index 'ri' can be allocated to the current
2968 * Allocation can fail if:
2969 * - the current process is already being profiled by a PMC at index 'ri',
2970 * attached to it via OP_PMCATTACH.
2971 * - the current process has already allocated a PMC at index 'ri'
2976 pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu)
2980 struct pmc_owner *po;
2981 struct pmc_process *pp;
2983 PMCDBG5(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d "
2984 "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu);
2987 * We shouldn't have already allocated a process-mode PMC at
2990 * We shouldn't have allocated a system-wide PMC on the same
2993 if ((po = pmc_find_owner_descriptor(p)) != NULL)
2994 LIST_FOREACH(pm, &po->po_pmcs, pm_next) {
2995 if (PMC_TO_ROWINDEX(pm) == ri) {
2996 mode = PMC_TO_MODE(pm);
2997 if (PMC_IS_VIRTUAL_MODE(mode))
2999 if (PMC_IS_SYSTEM_MODE(mode) &&
3000 (int) PMC_TO_CPU(pm) == cpu)
3006 * We also shouldn't be the target of any PMC at this index
3007 * since otherwise a PMC_ATTACH to ourselves will fail.
3009 if ((pp = pmc_find_process_descriptor(p, 0)) != NULL)
3010 if (pp->pp_pmcs[ri].pp_pmc)
3013 PMCDBG4(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok",
3014 p, p->p_pid, p->p_comm, ri);
3020 * Check if a given PMC at row index 'ri' can be currently used in
3025 pmc_can_allocate_row(int ri, enum pmc_mode mode)
3029 sx_assert(&pmc_sx, SX_XLOCKED);
3031 PMCDBG2(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode);
3033 if (PMC_IS_SYSTEM_MODE(mode))
3034 disp = PMC_DISP_STANDALONE;
3036 disp = PMC_DISP_THREAD;
3039 * check disposition for PMC row 'ri':
3041 * Expected disposition Row-disposition Result
3043 * STANDALONE STANDALONE or FREE proceed
3044 * STANDALONE THREAD fail
3045 * THREAD THREAD or FREE proceed
3046 * THREAD STANDALONE fail
3049 if (!PMC_ROW_DISP_IS_FREE(ri) &&
3050 !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)) &&
3051 !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri)))
3058 PMCDBG2(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode);
3065 * Find a PMC descriptor with user handle 'pmcid' for thread 'td'.
3069 pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid)
3073 KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc,
3074 ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__,
3075 PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc));
3077 LIST_FOREACH(pm, &po->po_pmcs, pm_next)
3078 if (pm->pm_id == pmcid)
3085 pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc)
3088 struct pmc *pm, *opm;
3089 struct pmc_owner *po;
3090 struct pmc_process *pp;
3092 PMCDBG1(PMC,FND,1, "find-pmc id=%d", pmcid);
3093 if (PMC_ID_TO_ROWINDEX(pmcid) >= md->pmd_npmc)
3096 if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL) {
3098 * In case of PMC_F_DESCENDANTS child processes we will not find
3099 * the current process in the owners hash list. Find the owner
3100 * process first and from there lookup the po.
3102 if ((pp = pmc_find_process_descriptor(curthread->td_proc,
3103 PMC_FLAG_NONE)) == NULL) {
3106 opm = pp->pp_pmcs[PMC_ID_TO_ROWINDEX(pmcid)].pp_pmc;
3109 if ((opm->pm_flags & (PMC_F_ATTACHED_TO_OWNER|
3110 PMC_F_DESCENDANTS)) != (PMC_F_ATTACHED_TO_OWNER|
3117 if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL)
3120 PMCDBG2(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm);
3131 pmc_start(struct pmc *pm)
3134 struct pmc_owner *po;
3135 struct pmc_binding pb;
3136 struct pmc_classdep *pcd;
3137 int adjri, error, cpu, ri;
3140 ("[pmc,%d] null pm", __LINE__));
3142 mode = PMC_TO_MODE(pm);
3143 ri = PMC_TO_ROWINDEX(pm);
3144 pcd = pmc_ri_to_classdep(md, ri, &adjri);
3148 PMCDBG3(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, mode, ri);
3153 * Disallow PMCSTART if a logfile is required but has not been
3156 if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) &&
3157 (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
3158 return (EDOOFUS); /* programming error */
3161 * If this is a sampling mode PMC, log mapping information for
3162 * the kernel modules that are currently loaded.
3164 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
3165 pmc_log_kernel_mappings(pm);
3167 if (PMC_IS_VIRTUAL_MODE(mode)) {
3170 * If a PMCATTACH has never been done on this PMC,
3171 * attach it to its owner process.
3174 if (LIST_EMPTY(&pm->pm_targets))
3175 error = (pm->pm_flags & PMC_F_ATTACH_DONE) ? ESRCH :
3176 pmc_attach_process(po->po_owner, pm);
3179 * If the PMC is attached to its owner, then force a context
3180 * switch to ensure that the MD state gets set correctly.
3184 pm->pm_state = PMC_STATE_RUNNING;
3185 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER)
3186 pmc_force_context_switch();
3194 * A system-wide PMC.
3196 * Add the owner to the global list if this is a system-wide
3200 if (mode == PMC_MODE_SS) {
3202 * Log mapping information for all existing processes in the
3203 * system. Subsequent mappings are logged as they happen;
3204 * see pmc_process_mmap().
3206 if (po->po_logprocmaps == 0) {
3207 pmc_log_all_process_mappings(po);
3208 po->po_logprocmaps = 1;
3211 if (po->po_sscount == 1) {
3212 atomic_add_rel_int(&pmc_ss_count, 1);
3213 CK_LIST_INSERT_HEAD(&pmc_ss_owners, po, po_ssnext);
3214 PMCDBG1(PMC,OPS,1, "po=%p in global list", po);
3219 * Move to the CPU associated with this
3220 * PMC, and start the hardware.
3223 pmc_save_cpu_binding(&pb);
3225 cpu = PMC_TO_CPU(pm);
3227 if (!pmc_cpu_is_active(cpu))
3230 pmc_select_cpu(cpu);
3233 * global PMCs are configured at allocation time
3234 * so write out the initial value and start the PMC.
3237 pm->pm_state = PMC_STATE_RUNNING;
3240 if ((error = pcd->pcd_write_pmc(cpu, adjri,
3241 PMC_IS_SAMPLING_MODE(mode) ?
3242 pm->pm_sc.pm_reloadcount :
3243 pm->pm_sc.pm_initial)) == 0) {
3244 /* If a sampling mode PMC, reset stalled state. */
3245 if (PMC_IS_SAMPLING_MODE(mode))
3246 pm->pm_pcpu_state[cpu].pps_stalled = 0;
3248 /* Indicate that we desire this to run. Start it. */
3249 pm->pm_pcpu_state[cpu].pps_cpustate = 1;
3250 error = pcd->pcd_start_pmc(cpu, adjri);
3254 pmc_restore_cpu_binding(&pb);
3264 pmc_stop(struct pmc *pm)
3266 struct pmc_owner *po;
3267 struct pmc_binding pb;
3268 struct pmc_classdep *pcd;
3269 int adjri, cpu, error, ri;
3271 KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__));
3273 PMCDBG3(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm,
3274 PMC_TO_MODE(pm), PMC_TO_ROWINDEX(pm));
3276 pm->pm_state = PMC_STATE_STOPPED;
3279 * If the PMC is a virtual mode one, changing the state to
3280 * non-RUNNING is enough to ensure that the PMC never gets
3283 * If this PMC is current running on a CPU, then it will
3284 * handled correctly at the time its target process is context
3288 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
3292 * A system-mode PMC. Move to the CPU associated with
3293 * this PMC, and stop the hardware. We update the
3294 * 'initial count' so that a subsequent PMCSTART will
3295 * resume counting from the current hardware count.
3298 pmc_save_cpu_binding(&pb);
3300 cpu = PMC_TO_CPU(pm);
3302 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
3303 ("[pmc,%d] illegal cpu=%d", __LINE__, cpu));
3305 if (!pmc_cpu_is_active(cpu))
3308 pmc_select_cpu(cpu);
3310 ri = PMC_TO_ROWINDEX(pm);
3311 pcd = pmc_ri_to_classdep(md, ri, &adjri);
3313 pm->pm_pcpu_state[cpu].pps_cpustate = 0;
3315 if ((error = pcd->pcd_stop_pmc(cpu, adjri)) == 0)
3316 error = pcd->pcd_read_pmc(cpu, adjri, &pm->pm_sc.pm_initial);
3319 pmc_restore_cpu_binding(&pb);
3323 /* remove this owner from the global list of SS PMC owners */
3324 if (PMC_TO_MODE(pm) == PMC_MODE_SS) {
3326 if (po->po_sscount == 0) {
3327 atomic_subtract_rel_int(&pmc_ss_count, 1);
3328 CK_LIST_REMOVE(po, po_ssnext);
3329 epoch_wait_preempt(global_epoch_preempt);
3330 PMCDBG1(PMC,OPS,2,"po=%p removed from global list", po);
3337 static struct pmc_classdep *
3338 pmc_class_to_classdep(enum pmc_class class)
3342 for (n = 0; n < md->pmd_nclass; n++)
3343 if (md->pmd_classdep[n].pcd_class == class)
3344 return (&md->pmd_classdep[n]);
3348 #if defined(HWPMC_DEBUG) && defined(KTR)
3349 static const char *pmc_op_to_name[] = {
3351 #define __PMC_OP(N, D) #N ,
3358 * The syscall interface
3361 #define PMC_GET_SX_XLOCK(...) do { \
3362 sx_xlock(&pmc_sx); \
3363 if (pmc_hook == NULL) { \
3364 sx_xunlock(&pmc_sx); \
3365 return __VA_ARGS__; \
3369 #define PMC_DOWNGRADE_SX() do { \
3370 sx_downgrade(&pmc_sx); \
3371 is_sx_downgraded = 1; \
3375 pmc_syscall_handler(struct thread *td, void *syscall_args)
3377 int error, is_sx_downgraded, op;
3378 struct pmc_syscall_args *c;
3379 void *pmclog_proc_handle;
3382 c = (struct pmc_syscall_args *)syscall_args;
3385 /* PMC isn't set up yet */
3386 if (pmc_hook == NULL)
3388 if (op == PMC_OP_CONFIGURELOG) {
3390 * We cannot create the logging process inside
3391 * pmclog_configure_log() because there is a LOR
3392 * between pmc_sx and process structure locks.
3393 * Instead, pre-create the process and ignite the loop
3394 * if everything is fine, otherwise direct the process
3397 error = pmclog_proc_create(td, &pmclog_proc_handle);
3402 PMC_GET_SX_XLOCK(ENOSYS);
3403 is_sx_downgraded = 0;
3404 PMCDBG3(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op,
3405 pmc_op_to_name[op], arg);
3408 counter_u64_add(pmc_stats.pm_syscalls, 1);
3414 * Configure a log file.
3416 * XXX This OP will be reworked.
3419 case PMC_OP_CONFIGURELOG:
3423 struct pmc_owner *po;
3424 struct pmc_op_configurelog cl;
3426 if ((error = copyin(arg, &cl, sizeof(cl))) != 0) {
3427 pmclog_proc_ignite(pmclog_proc_handle, NULL);
3431 /* mark this process as owning a log file */
3433 if ((po = pmc_find_owner_descriptor(p)) == NULL)
3434 if ((po = pmc_allocate_owner_descriptor(p)) == NULL) {
3435 pmclog_proc_ignite(pmclog_proc_handle, NULL);
3441 * If a valid fd was passed in, try to configure that,
3442 * otherwise if 'fd' was less than zero and there was
3443 * a log file configured, flush its buffers and
3446 if (cl.pm_logfd >= 0) {
3447 error = pmclog_configure_log(md, po, cl.pm_logfd);
3448 pmclog_proc_ignite(pmclog_proc_handle, error == 0 ?
3450 } else if (po->po_flags & PMC_PO_OWNS_LOGFILE) {
3451 pmclog_proc_ignite(pmclog_proc_handle, NULL);
3452 error = pmclog_close(po);
3454 LIST_FOREACH(pm, &po->po_pmcs, pm_next)
3455 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
3456 pm->pm_state == PMC_STATE_RUNNING)
3458 error = pmclog_deconfigure_log(po);
3461 pmclog_proc_ignite(pmclog_proc_handle, NULL);
3471 case PMC_OP_FLUSHLOG:
3473 struct pmc_owner *po;
3475 sx_assert(&pmc_sx, SX_XLOCKED);
3477 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
3482 error = pmclog_flush(po, 0);
3490 case PMC_OP_CLOSELOG:
3492 struct pmc_owner *po;
3494 sx_assert(&pmc_sx, SX_XLOCKED);
3496 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
3501 error = pmclog_close(po);
3506 * Retrieve hardware configuration.
3509 case PMC_OP_GETCPUINFO: /* CPU information */
3511 struct pmc_op_getcpuinfo gci;
3512 struct pmc_classinfo *pci;
3513 struct pmc_classdep *pcd;
3516 memset(&gci, 0, sizeof(gci));
3517 gci.pm_cputype = md->pmd_cputype;
3518 gci.pm_ncpu = pmc_cpu_max();
3519 gci.pm_npmc = md->pmd_npmc;
3520 gci.pm_nclass = md->pmd_nclass;
3521 pci = gci.pm_classes;
3522 pcd = md->pmd_classdep;
3523 for (cl = 0; cl < md->pmd_nclass; cl++, pci++, pcd++) {
3524 pci->pm_caps = pcd->pcd_caps;
3525 pci->pm_class = pcd->pcd_class;
3526 pci->pm_width = pcd->pcd_width;
3527 pci->pm_num = pcd->pcd_num;
3529 error = copyout(&gci, arg, sizeof(gci));
3534 * Retrieve soft events list.
3536 case PMC_OP_GETDYNEVENTINFO:
3540 struct pmc_op_getdyneventinfo *gei;
3541 struct pmc_dyn_event_descr dev;
3542 struct pmc_soft *ps;
3545 sx_assert(&pmc_sx, SX_LOCKED);
3547 gei = (struct pmc_op_getdyneventinfo *) arg;
3549 if ((error = copyin(&gei->pm_class, &cl, sizeof(cl))) != 0)
3552 /* Only SOFT class is dynamic. */
3553 if (cl != PMC_CLASS_SOFT) {
3559 for (ev = PMC_EV_SOFT_FIRST; (int)ev <= PMC_EV_SOFT_LAST; ev++) {
3560 ps = pmc_soft_ev_acquire(ev);
3563 bcopy(&ps->ps_ev, &dev, sizeof(dev));
3564 pmc_soft_ev_release(ps);
3566 error = copyout(&dev,
3567 &gei->pm_events[nevent],
3568 sizeof(struct pmc_dyn_event_descr));
3576 error = copyout(&nevent, &gei->pm_nevent,
3582 * Get module statistics
3585 case PMC_OP_GETDRIVERSTATS:
3587 struct pmc_op_getdriverstats gms;
3588 #define CFETCH(a, b, field) a.field = counter_u64_fetch(b.field)
3589 CFETCH(gms, pmc_stats, pm_intr_ignored);
3590 CFETCH(gms, pmc_stats, pm_intr_processed);
3591 CFETCH(gms, pmc_stats, pm_intr_bufferfull);
3592 CFETCH(gms, pmc_stats, pm_syscalls);
3593 CFETCH(gms, pmc_stats, pm_syscall_errors);
3594 CFETCH(gms, pmc_stats, pm_buffer_requests);
3595 CFETCH(gms, pmc_stats, pm_buffer_requests_failed);
3596 CFETCH(gms, pmc_stats, pm_log_sweeps);
3598 error = copyout(&gms, arg, sizeof(gms));
3604 * Retrieve module version number
3607 case PMC_OP_GETMODULEVERSION:
3611 /* retrieve the client's idea of the ABI version */
3612 if ((error = copyin(arg, &cv, sizeof(uint32_t))) != 0)
3614 /* don't service clients newer than our driver */
3616 if ((cv & 0xFFFF0000) > (modv & 0xFFFF0000)) {
3617 error = EPROGMISMATCH;
3620 error = copyout(&modv, arg, sizeof(int));
3626 * Retrieve the state of all the PMCs on a given
3630 case PMC_OP_GETPMCINFO:
3634 size_t pmcinfo_size;
3635 uint32_t cpu, n, npmc;
3636 struct pmc_owner *po;
3637 struct pmc_binding pb;
3638 struct pmc_classdep *pcd;
3639 struct pmc_info *p, *pmcinfo;
3640 struct pmc_op_getpmcinfo *gpi;
3644 gpi = (struct pmc_op_getpmcinfo *) arg;
3646 if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0)
3649 if (cpu >= pmc_cpu_max()) {
3654 if (!pmc_cpu_is_active(cpu)) {
3659 /* switch to CPU 'cpu' */
3660 pmc_save_cpu_binding(&pb);
3661 pmc_select_cpu(cpu);
3663 npmc = md->pmd_npmc;
3665 pmcinfo_size = npmc * sizeof(struct pmc_info);
3666 pmcinfo = malloc(pmcinfo_size, M_PMC, M_WAITOK | M_ZERO);
3670 for (n = 0; n < md->pmd_npmc; n++, p++) {
3672 pcd = pmc_ri_to_classdep(md, n, &ari);
3674 KASSERT(pcd != NULL,
3675 ("[pmc,%d] null pcd ri=%d", __LINE__, n));
3677 if ((error = pcd->pcd_describe(cpu, ari, p, &pm)) != 0)
3680 if (PMC_ROW_DISP_IS_STANDALONE(n))
3681 p->pm_rowdisp = PMC_DISP_STANDALONE;
3682 else if (PMC_ROW_DISP_IS_THREAD(n))
3683 p->pm_rowdisp = PMC_DISP_THREAD;
3685 p->pm_rowdisp = PMC_DISP_FREE;
3687 p->pm_ownerpid = -1;
3689 if (pm == NULL) /* no PMC associated */
3694 KASSERT(po->po_owner != NULL,
3695 ("[pmc,%d] pmc_owner had a null proc pointer",
3698 p->pm_ownerpid = po->po_owner->p_pid;
3699 p->pm_mode = PMC_TO_MODE(pm);
3700 p->pm_event = pm->pm_event;
3701 p->pm_flags = pm->pm_flags;
3703 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
3705 pm->pm_sc.pm_reloadcount;
3708 pmc_restore_cpu_binding(&pb);
3710 /* now copy out the PMC info collected */
3712 error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size);
3714 free(pmcinfo, M_PMC);
3720 * Set the administrative state of a PMC. I.e. whether
3721 * the PMC is to be used or not.
3724 case PMC_OP_PMCADMIN:
3727 enum pmc_state request;
3730 struct pmc_op_pmcadmin pma;
3731 struct pmc_binding pb;
3733 sx_assert(&pmc_sx, SX_XLOCKED);
3735 KASSERT(td == curthread,
3736 ("[pmc,%d] td != curthread", __LINE__));
3738 error = priv_check(td, PRIV_PMC_MANAGE);
3742 if ((error = copyin(arg, &pma, sizeof(pma))) != 0)
3747 if (cpu < 0 || cpu >= (int) pmc_cpu_max()) {
3752 if (!pmc_cpu_is_active(cpu)) {
3757 request = pma.pm_state;
3759 if (request != PMC_STATE_DISABLED &&
3760 request != PMC_STATE_FREE) {
3765 ri = pma.pm_pmc; /* pmc id == row index */
3766 if (ri < 0 || ri >= (int) md->pmd_npmc) {
3772 * We can't disable a PMC with a row-index allocated
3773 * for process virtual PMCs.
3776 if (PMC_ROW_DISP_IS_THREAD(ri) &&
3777 request == PMC_STATE_DISABLED) {
3783 * otherwise, this PMC on this CPU is either free or
3784 * in system-wide mode.
3787 pmc_save_cpu_binding(&pb);
3788 pmc_select_cpu(cpu);
3791 phw = pc->pc_hwpmcs[ri];
3794 * XXX do we need some kind of 'forced' disable?
3797 if (phw->phw_pmc == NULL) {
3798 if (request == PMC_STATE_DISABLED &&
3799 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) {
3800 phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED;
3801 PMC_MARK_ROW_STANDALONE(ri);
3802 } else if (request == PMC_STATE_FREE &&
3803 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) {
3804 phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED;
3805 PMC_UNMARK_ROW_STANDALONE(ri);
3807 /* other cases are a no-op */
3811 pmc_restore_cpu_binding(&pb);
3820 case PMC_OP_PMCALLOCATE:
3828 struct pmc_binding pb;
3829 struct pmc_classdep *pcd;
3830 struct pmc_op_pmcallocate pa;
3832 if ((error = copyin(arg, &pa, sizeof(pa))) != 0)
3839 if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC &&
3840 mode != PMC_MODE_TS && mode != PMC_MODE_TC) ||
3841 (cpu != (u_int) PMC_CPU_ANY && cpu >= pmc_cpu_max())) {
3847 * Virtual PMCs should only ask for a default CPU.
3848 * System mode PMCs need to specify a non-default CPU.
3851 if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != (u_int) PMC_CPU_ANY) ||
3852 (PMC_IS_SYSTEM_MODE(mode) && cpu == (u_int) PMC_CPU_ANY)) {
3858 * Check that an inactive CPU is not being asked for.
3861 if (PMC_IS_SYSTEM_MODE(mode) && !pmc_cpu_is_active(cpu)) {
3867 * Refuse an allocation for a system-wide PMC if this
3868 * process has been jailed, or if this process lacks
3869 * super-user credentials and the sysctl tunable
3870 * 'security.bsd.unprivileged_syspmcs' is zero.
3873 if (PMC_IS_SYSTEM_MODE(mode)) {
3874 if (jailed(curthread->td_ucred)) {
3878 if (!pmc_unprivileged_syspmcs) {
3879 error = priv_check(curthread,
3887 * Look for valid values for 'pm_flags'
3890 if ((pa.pm_flags & ~(PMC_F_DESCENDANTS | PMC_F_LOG_PROCCSW |
3891 PMC_F_LOG_PROCEXIT | PMC_F_CALLCHAIN |
3892 PMC_F_USERCALLCHAIN)) != 0) {
3897 /* PMC_F_USERCALLCHAIN is only valid with PMC_F_CALLCHAIN */
3898 if ((pa.pm_flags & (PMC_F_CALLCHAIN | PMC_F_USERCALLCHAIN)) ==
3899 PMC_F_USERCALLCHAIN) {
3904 /* PMC_F_USERCALLCHAIN is only valid for sampling mode */
3905 if (pa.pm_flags & PMC_F_USERCALLCHAIN &&
3906 mode != PMC_MODE_TS && mode != PMC_MODE_SS) {
3911 /* process logging options are not allowed for system PMCs */
3912 if (PMC_IS_SYSTEM_MODE(mode) && (pa.pm_flags &
3913 (PMC_F_LOG_PROCCSW | PMC_F_LOG_PROCEXIT))) {
3919 * All sampling mode PMCs need to be able to interrupt the
3922 if (PMC_IS_SAMPLING_MODE(mode))
3923 caps |= PMC_CAP_INTERRUPT;
3925 /* A valid class specifier should have been passed in. */
3926 pcd = pmc_class_to_classdep(pa.pm_class);
3932 /* The requested PMC capabilities should be feasible. */
3933 if ((pcd->pcd_caps & caps) != caps) {
3938 PMCDBG4(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d",
3939 pa.pm_ev, caps, mode, cpu);
3941 pmc = pmc_allocate_pmc_descriptor();
3942 pmc->pm_id = PMC_ID_MAKE_ID(cpu,pa.pm_mode,pa.pm_class,
3944 pmc->pm_event = pa.pm_ev;
3945 pmc->pm_state = PMC_STATE_FREE;
3946 pmc->pm_caps = caps;
3947 pmc->pm_flags = pa.pm_flags;
3949 /* XXX set lower bound on sampling for process counters */
3950 if (PMC_IS_SAMPLING_MODE(mode)) {
3952 * Don't permit requested sample rate to be less than 1000
3954 if (pa.pm_count < 1000)
3956 "pmcallocate: passed sample rate %ju - setting to 1000\n",
3957 (uintmax_t)pa.pm_count);
3958 pmc->pm_sc.pm_reloadcount = MAX(1000, pa.pm_count);
3960 pmc->pm_sc.pm_initial = pa.pm_count;
3962 /* switch thread to CPU 'cpu' */
3963 pmc_save_cpu_binding(&pb);
3965 #define PMC_IS_SHAREABLE_PMC(cpu, n) \
3966 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state & \
3967 PMC_PHW_FLAG_IS_SHAREABLE)
3968 #define PMC_IS_UNALLOCATED(cpu, n) \
3969 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL)
3971 if (PMC_IS_SYSTEM_MODE(mode)) {
3972 pmc_select_cpu(cpu);
3973 for (n = pcd->pcd_ri; n < (int) md->pmd_npmc; n++) {
3974 pcd = pmc_ri_to_classdep(md, n, &adjri);
3975 if (pmc_can_allocate_row(n, mode) == 0 &&
3976 pmc_can_allocate_rowindex(
3977 curthread->td_proc, n, cpu) == 0 &&
3978 (PMC_IS_UNALLOCATED(cpu, n) ||
3979 PMC_IS_SHAREABLE_PMC(cpu, n)) &&
3980 pcd->pcd_allocate_pmc(cpu, adjri, pmc,
3985 /* Process virtual mode */
3986 for (n = pcd->pcd_ri; n < (int) md->pmd_npmc; n++) {
3987 pcd = pmc_ri_to_classdep(md, n, &adjri);
3988 if (pmc_can_allocate_row(n, mode) == 0 &&
3989 pmc_can_allocate_rowindex(
3990 curthread->td_proc, n,
3991 PMC_CPU_ANY) == 0 &&
3992 pcd->pcd_allocate_pmc(curthread->td_oncpu,
3993 adjri, pmc, &pa) == 0)
3998 #undef PMC_IS_UNALLOCATED
3999 #undef PMC_IS_SHAREABLE_PMC
4001 pmc_restore_cpu_binding(&pb);
4003 if (n == (int) md->pmd_npmc) {
4004 pmc_destroy_pmc_descriptor(pmc);
4010 /* Fill in the correct value in the ID field */
4011 pmc->pm_id = PMC_ID_MAKE_ID(cpu,mode,pa.pm_class,n);
4013 PMCDBG5(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x",
4014 pmc->pm_event, pa.pm_class, mode, n, pmc->pm_id);
4016 /* Process mode PMCs with logging enabled need log files */
4017 if (pmc->pm_flags & (PMC_F_LOG_PROCEXIT | PMC_F_LOG_PROCCSW))
4018 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
4020 /* All system mode sampling PMCs require a log file */
4021 if (PMC_IS_SAMPLING_MODE(mode) && PMC_IS_SYSTEM_MODE(mode))
4022 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
4025 * Configure global pmc's immediately
4028 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pmc))) {
4030 pmc_save_cpu_binding(&pb);
4031 pmc_select_cpu(cpu);
4033 phw = pmc_pcpu[cpu]->pc_hwpmcs[n];
4034 pcd = pmc_ri_to_classdep(md, n, &adjri);
4036 if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0 ||
4037 (error = pcd->pcd_config_pmc(cpu, adjri, pmc)) != 0) {
4038 (void) pcd->pcd_release_pmc(cpu, adjri, pmc);
4039 pmc_destroy_pmc_descriptor(pmc);
4041 pmc_restore_cpu_binding(&pb);
4046 pmc_restore_cpu_binding(&pb);
4049 pmc->pm_state = PMC_STATE_ALLOCATED;
4050 pmc->pm_class = pa.pm_class;
4053 * mark row disposition
4056 if (PMC_IS_SYSTEM_MODE(mode))
4057 PMC_MARK_ROW_STANDALONE(n);
4059 PMC_MARK_ROW_THREAD(n);
4062 * Register this PMC with the current thread as its owner.
4066 pmc_register_owner(curthread->td_proc, pmc)) != 0) {
4067 pmc_release_pmc_descriptor(pmc);
4068 pmc_destroy_pmc_descriptor(pmc);
4075 * Return the allocated index.
4078 pa.pm_pmcid = pmc->pm_id;
4080 error = copyout(&pa, arg, sizeof(pa));
4086 * Attach a PMC to a process.
4089 case PMC_OP_PMCATTACH:
4093 struct pmc_op_pmcattach a;
4095 sx_assert(&pmc_sx, SX_XLOCKED);
4097 if ((error = copyin(arg, &a, sizeof(a))) != 0)
4103 } else if (a.pm_pid == 0)
4104 a.pm_pid = td->td_proc->p_pid;
4106 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
4109 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
4114 /* PMCs may be (re)attached only when allocated or stopped */
4115 if (pm->pm_state == PMC_STATE_RUNNING) {
4118 } else if (pm->pm_state != PMC_STATE_ALLOCATED &&
4119 pm->pm_state != PMC_STATE_STOPPED) {
4125 if ((p = pfind(a.pm_pid)) == NULL) {
4131 * Ignore processes that are working on exiting.
4133 if (p->p_flag & P_WEXIT) {
4135 PROC_UNLOCK(p); /* pfind() returns a locked process */
4140 * we are allowed to attach a PMC to a process if
4143 error = p_candebug(curthread, p);
4148 error = pmc_attach_process(p, pm);
4154 * Detach an attached PMC from a process.
4157 case PMC_OP_PMCDETACH:
4161 struct pmc_op_pmcattach a;
4163 if ((error = copyin(arg, &a, sizeof(a))) != 0)
4169 } else if (a.pm_pid == 0)
4170 a.pm_pid = td->td_proc->p_pid;
4172 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
4175 if ((p = pfind(a.pm_pid)) == NULL) {
4181 * Treat processes that are in the process of exiting
4182 * as if they were not present.
4185 if (p->p_flag & P_WEXIT)
4188 PROC_UNLOCK(p); /* pfind() returns a locked process */
4191 error = pmc_detach_process(p, pm);
4197 * Retrieve the MSR number associated with the counter
4198 * 'pmc_id'. This allows processes to directly use RDPMC
4199 * instructions to read their PMCs, without the overhead of a
4203 case PMC_OP_PMCGETMSR:
4207 struct pmc_target *pt;
4208 struct pmc_op_getmsr gm;
4209 struct pmc_classdep *pcd;
4213 if ((error = copyin(arg, &gm, sizeof(gm))) != 0)
4216 if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0)
4220 * The allocated PMC has to be a process virtual PMC,
4221 * i.e., of type MODE_T[CS]. Global PMCs can only be
4222 * read using the PMCREAD operation since they may be
4223 * allocated on a different CPU than the one we could
4224 * be running on at the time of the RDPMC instruction.
4226 * The GETMSR operation is not allowed for PMCs that
4227 * are inherited across processes.
4230 if (!PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) ||
4231 (pm->pm_flags & PMC_F_DESCENDANTS)) {
4237 * It only makes sense to use a RDPMC (or its
4238 * equivalent instruction on non-x86 architectures) on
4239 * a process that has allocated and attached a PMC to
4240 * itself. Conversely the PMC is only allowed to have
4241 * one process attached to it -- its owner.
4244 if ((pt = LIST_FIRST(&pm->pm_targets)) == NULL ||
4245 LIST_NEXT(pt, pt_next) != NULL ||
4246 pt->pt_process->pp_proc != pm->pm_owner->po_owner) {
4251 ri = PMC_TO_ROWINDEX(pm);
4252 pcd = pmc_ri_to_classdep(md, ri, &adjri);
4254 /* PMC class has no 'GETMSR' support */
4255 if (pcd->pcd_get_msr == NULL) {
4260 if ((error = (*pcd->pcd_get_msr)(adjri, &gm.pm_msr)) < 0)
4263 if ((error = copyout(&gm, arg, sizeof(gm))) < 0)
4267 * Mark our process as using MSRs. Update machine
4268 * state using a forced context switch.
4271 pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS;
4272 pmc_force_context_switch();
4278 * Release an allocated PMC
4281 case PMC_OP_PMCRELEASE:
4285 struct pmc_owner *po;
4286 struct pmc_op_simple sp;
4289 * Find PMC pointer for the named PMC.
4291 * Use pmc_release_pmc_descriptor() to switch off the
4292 * PMC, remove all its target threads, and remove the
4293 * PMC from its owner's list.
4295 * Remove the owner record if this is the last PMC
4301 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
4304 pmcid = sp.pm_pmcid;
4306 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
4310 pmc_release_pmc_descriptor(pm);
4311 pmc_maybe_remove_owner(po);
4312 pmc_destroy_pmc_descriptor(pm);
4318 * Read and/or write a PMC.
4326 pmc_value_t oldvalue;
4327 struct pmc_binding pb;
4328 struct pmc_op_pmcrw prw;
4329 struct pmc_classdep *pcd;
4330 struct pmc_op_pmcrw *pprw;
4334 if ((error = copyin(arg, &prw, sizeof(prw))) != 0)
4338 PMCDBG2(PMC,OPS,1, "rw id=%d flags=0x%x", prw.pm_pmcid,
4341 /* must have at least one flag set */
4342 if ((prw.pm_flags & (PMC_F_OLDVALUE|PMC_F_NEWVALUE)) == 0) {
4347 /* locate pmc descriptor */
4348 if ((error = pmc_find_pmc(prw.pm_pmcid, &pm)) != 0)
4351 /* Can't read a PMC that hasn't been started. */
4352 if (pm->pm_state != PMC_STATE_ALLOCATED &&
4353 pm->pm_state != PMC_STATE_STOPPED &&
4354 pm->pm_state != PMC_STATE_RUNNING) {
4359 /* writing a new value is allowed only for 'STOPPED' pmcs */
4360 if (pm->pm_state == PMC_STATE_RUNNING &&
4361 (prw.pm_flags & PMC_F_NEWVALUE)) {
4366 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) {
4369 * If this PMC is attached to its owner (i.e.,
4370 * the process requesting this operation) and
4371 * is running, then attempt to get an
4372 * upto-date reading from hardware for a READ.
4373 * Writes are only allowed when the PMC is
4374 * stopped, so only update the saved value
4377 * If the PMC is not running, or is not
4378 * attached to its owner, read/write to the
4382 ri = PMC_TO_ROWINDEX(pm);
4383 pcd = pmc_ri_to_classdep(md, ri, &adjri);
4385 mtx_pool_lock_spin(pmc_mtxpool, pm);
4386 cpu = curthread->td_oncpu;
4388 if (prw.pm_flags & PMC_F_OLDVALUE) {
4389 if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) &&
4390 (pm->pm_state == PMC_STATE_RUNNING))
4391 error = (*pcd->pcd_read_pmc)(cpu, adjri,
4394 oldvalue = pm->pm_gv.pm_savedvalue;
4396 if (prw.pm_flags & PMC_F_NEWVALUE)
4397 pm->pm_gv.pm_savedvalue = prw.pm_value;
4399 mtx_pool_unlock_spin(pmc_mtxpool, pm);
4401 } else { /* System mode PMCs */
4402 cpu = PMC_TO_CPU(pm);
4403 ri = PMC_TO_ROWINDEX(pm);
4404 pcd = pmc_ri_to_classdep(md, ri, &adjri);
4406 if (!pmc_cpu_is_active(cpu)) {
4411 /* move this thread to CPU 'cpu' */
4412 pmc_save_cpu_binding(&pb);
4413 pmc_select_cpu(cpu);
4416 /* save old value */
4417 if (prw.pm_flags & PMC_F_OLDVALUE)
4418 if ((error = (*pcd->pcd_read_pmc)(cpu, adjri,
4421 /* write out new value */
4422 if (prw.pm_flags & PMC_F_NEWVALUE)
4423 error = (*pcd->pcd_write_pmc)(cpu, adjri,
4427 pmc_restore_cpu_binding(&pb);
4432 pprw = (struct pmc_op_pmcrw *) arg;
4435 if (prw.pm_flags & PMC_F_NEWVALUE)
4436 PMCDBG3(PMC,OPS,2, "rw id=%d new %jx -> old %jx",
4437 ri, prw.pm_value, oldvalue);
4438 else if (prw.pm_flags & PMC_F_OLDVALUE)
4439 PMCDBG2(PMC,OPS,2, "rw id=%d -> old %jx", ri, oldvalue);
4442 /* return old value if requested */
4443 if (prw.pm_flags & PMC_F_OLDVALUE)
4444 if ((error = copyout(&oldvalue, &pprw->pm_value,
4445 sizeof(prw.pm_value))))
4453 * Set the sampling rate for a sampling mode PMC and the
4454 * initial count for a counting mode PMC.
4457 case PMC_OP_PMCSETCOUNT:
4460 struct pmc_op_pmcsetcount sc;
4464 if ((error = copyin(arg, &sc, sizeof(sc))) != 0)
4467 if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0)
4470 if (pm->pm_state == PMC_STATE_RUNNING) {
4475 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
4477 * Don't permit requested sample rate to be less than 1000
4479 if (sc.pm_count < 1000)
4481 "pmcsetcount: passed sample rate %ju - setting to 1000\n",
4482 (uintmax_t)sc.pm_count);
4483 pm->pm_sc.pm_reloadcount = MAX(1000, sc.pm_count);
4485 pm->pm_sc.pm_initial = sc.pm_count;
4494 case PMC_OP_PMCSTART:
4498 struct pmc_op_simple sp;
4500 sx_assert(&pmc_sx, SX_XLOCKED);
4502 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
4505 pmcid = sp.pm_pmcid;
4507 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
4510 KASSERT(pmcid == pm->pm_id,
4511 ("[pmc,%d] pmcid %x != id %x", __LINE__,
4514 if (pm->pm_state == PMC_STATE_RUNNING) /* already running */
4516 else if (pm->pm_state != PMC_STATE_STOPPED &&
4517 pm->pm_state != PMC_STATE_ALLOCATED) {
4522 error = pmc_start(pm);
4531 case PMC_OP_PMCSTOP:
4535 struct pmc_op_simple sp;
4539 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
4542 pmcid = sp.pm_pmcid;
4545 * Mark the PMC as inactive and invoke the MD stop
4546 * routines if needed.
4549 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
4552 KASSERT(pmcid == pm->pm_id,
4553 ("[pmc,%d] pmc id %x != pmcid %x", __LINE__,
4556 if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */
4558 else if (pm->pm_state != PMC_STATE_RUNNING) {
4563 error = pmc_stop(pm);
4569 * Write a user supplied value to the log file.
4572 case PMC_OP_WRITELOG:
4574 struct pmc_op_writelog wl;
4575 struct pmc_owner *po;
4579 if ((error = copyin(arg, &wl, sizeof(wl))) != 0)
4582 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
4587 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
4592 error = pmclog_process_userlog(po, &wl);
4602 if (is_sx_downgraded)
4603 sx_sunlock(&pmc_sx);
4605 sx_xunlock(&pmc_sx);
4608 counter_u64_add(pmc_stats.pm_syscall_errors, 1);
4619 * Mark the thread as needing callchain capture and post an AST. The
4620 * actual callchain capture will be done in a context where it is safe
4621 * to take page faults.
4625 pmc_post_callchain_callback(void)
4632 * If there is multiple PMCs for the same interrupt ignore new post
4634 if (td->td_pflags & TDP_CALLCHAIN)
4638 * Mark this thread as needing callchain capture.
4639 * `td->td_pflags' will be safe to touch because this thread
4640 * was in user space when it was interrupted.
4642 td->td_pflags |= TDP_CALLCHAIN;
4645 * Don't let this thread migrate between CPUs until callchain
4646 * capture completes.
4654 * Find a free slot in the per-cpu array of samples and capture the
4655 * current callchain there. If a sample was successfully added, a bit
4656 * is set in mask 'pmc_cpumask' denoting that the DO_SAMPLES hook
4657 * needs to be invoked from the clock handler.
4659 * This function is meant to be called from an NMI handler. It cannot
4660 * use any of the locking primitives supplied by the OS.
4664 pmc_add_sample(ring_type_t ring, struct pmc *pm, struct trapframe *tf)
4666 int error, cpu, callchaindepth, inuserspace;
4668 struct pmc_sample *ps;
4669 struct pmc_samplebuffer *psb;
4674 * Allocate space for a sample buffer.
4677 psb = pmc_pcpu[cpu]->pc_sb[ring];
4678 inuserspace = TRAPF_USERMODE(tf);
4679 ps = PMC_PROD_SAMPLE(psb);
4680 if (psb->ps_considx != psb->ps_prodidx &&
4681 ps->ps_nsamples) { /* in use, reader hasn't caught up */
4682 pm->pm_pcpu_state[cpu].pps_stalled = 1;
4683 counter_u64_add(pmc_stats.pm_intr_bufferfull, 1);
4684 PMCDBG6(SAM,INT,1,"(spc) cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d",
4685 cpu, pm, (void *) tf, inuserspace,
4686 (int) (psb->ps_prodidx & pmc_sample_mask),
4687 (int) (psb->ps_considx & pmc_sample_mask));
4693 /* Fill in entry. */
4694 PMCDBG6(SAM,INT,1,"cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", cpu, pm,
4695 (void *) tf, inuserspace,
4696 (int) (psb->ps_prodidx & pmc_sample_mask),
4697 (int) (psb->ps_considx & pmc_sample_mask));
4702 ps->ps_pid = td->td_proc->p_pid;
4703 ps->ps_tid = td->td_tid;
4704 ps->ps_tsc = pmc_rdtsc();
4705 ps->ps_ticks = ticks;
4707 ps->ps_flags = inuserspace ? PMC_CC_F_USERSPACE : 0;
4709 callchaindepth = (pm->pm_flags & PMC_F_CALLCHAIN) ?
4710 pmc_callchaindepth : 1;
4712 MPASS(ps->ps_pc != NULL);
4713 if (callchaindepth == 1)
4714 ps->ps_pc[0] = PMC_TRAPFRAME_TO_PC(tf);
4717 * Kernel stack traversals can be done immediately,
4718 * while we defer to an AST for user space traversals.
4722 pmc_save_kernel_callchain(ps->ps_pc,
4723 callchaindepth, tf);
4725 pmc_post_callchain_callback();
4726 callchaindepth = PMC_USER_CALLCHAIN_PENDING;
4730 ps->ps_nsamples = callchaindepth; /* mark entry as in use */
4731 if (ring == PMC_UR) {
4732 ps->ps_nsamples_actual = callchaindepth; /* mark entry as in use */
4733 ps->ps_nsamples = PMC_USER_CALLCHAIN_PENDING;
4735 ps->ps_nsamples = callchaindepth; /* mark entry as in use */
4737 KASSERT(counter_u64_fetch(pm->pm_runcount) >= 0,
4738 ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm,
4739 (unsigned long)counter_u64_fetch(pm->pm_runcount)));
4741 counter_u64_add(pm->pm_runcount, 1); /* hold onto PMC */
4742 /* increment write pointer */
4745 /* mark CPU as needing processing */
4746 if (callchaindepth != PMC_USER_CALLCHAIN_PENDING)
4747 DPCPU_SET(pmc_sampled, 1);
4753 * Interrupt processing.
4755 * This function is meant to be called from an NMI handler. It cannot
4756 * use any of the locking primitives supplied by the OS.
4760 pmc_process_interrupt(int ring, struct pmc *pm, struct trapframe *tf)
4765 if ((pm->pm_flags & PMC_F_USERCALLCHAIN) &&
4766 (td->td_proc->p_flag & P_KPROC) == 0 &&
4767 !TRAPF_USERMODE(tf)) {
4768 atomic_add_int(&td->td_pmcpend, 1);
4769 return (pmc_add_sample(PMC_UR, pm, tf));
4771 return (pmc_add_sample(ring, pm, tf));
4775 * Capture a user call chain. This function will be called from ast()
4776 * before control returns to userland and before the process gets
4781 pmc_capture_user_callchain(int cpu, int ring, struct trapframe *tf)
4785 struct pmc_sample *ps;
4786 struct pmc_samplebuffer *psb;
4787 uint64_t considx, prodidx;
4788 int nsamples, nrecords, pass, iter;
4792 int start_ticks = ticks;
4794 psb = pmc_pcpu[cpu]->pc_sb[ring];
4797 KASSERT(td->td_pflags & TDP_CALLCHAIN,
4798 ("[pmc,%d] Retrieving callchain for thread that doesn't want it",
4809 nrecords = atomic_readandclear_32(&td->td_pmcpend);
4811 for (iter = 0, considx = psb->ps_considx, prodidx = psb->ps_prodidx;
4812 considx < prodidx && iter < pmc_nsamples; considx++, iter++) {
4813 ps = PMC_CONS_SAMPLE_OFF(psb, considx);
4816 * Iterate through all deferred callchain requests.
4817 * Walk from the current read pointer to the current
4822 if (ps->ps_nsamples == PMC_SAMPLE_FREE) {
4827 if ((ps->ps_pmc == NULL) ||
4828 (ps->ps_pmc->pm_state != PMC_STATE_RUNNING))
4831 if (ps->ps_td != td ||
4832 ps->ps_nsamples != PMC_USER_CALLCHAIN_PENDING ||
4833 ps->ps_pmc->pm_state != PMC_STATE_RUNNING)
4836 KASSERT(ps->ps_cpu == cpu,
4837 ("[pmc,%d] cpu mismatch ps_cpu=%d pcpu=%d", __LINE__,
4838 ps->ps_cpu, PCPU_GET(cpuid)));
4842 KASSERT(pm->pm_flags & PMC_F_CALLCHAIN,
4843 ("[pmc,%d] Retrieving callchain for PMC that doesn't "
4844 "want it", __LINE__));
4846 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,
4847 ("[pmc,%d] runcount %ld", __LINE__, (unsigned long)counter_u64_fetch(pm->pm_runcount)));
4849 if (ring == PMC_UR) {
4850 nsamples = ps->ps_nsamples_actual;
4851 counter_u64_add(pmc_stats.pm_merges, 1);
4856 * Retrieve the callchain and mark the sample buffer
4857 * as 'processable' by the timer tick sweep code.
4864 if (__predict_true(nsamples < pmc_callchaindepth - 1))
4865 nsamples += pmc_save_user_callchain(ps->ps_pc + nsamples,
4866 pmc_callchaindepth - nsamples - 1, tf);
4869 * We have to prevent hardclock from potentially overwriting
4870 * this sample between when we read the value and when we set
4875 * Verify that the sample hasn't been dropped in the meantime
4877 if (ps->ps_nsamples == PMC_USER_CALLCHAIN_PENDING) {
4878 ps->ps_nsamples = nsamples;
4880 * If we couldn't get a sample, simply drop the reference
4883 counter_u64_add(pm->pm_runcount, -1);
4886 if (nrecords-- == 1)
4889 if (__predict_false(ring == PMC_UR && td->td_pmcpend)) {
4894 /* only collect samples for this part once */
4899 if ((ticks - start_ticks) > hz)
4900 log(LOG_ERR, "%s took %d ticks\n", __func__, (ticks - start_ticks));
4903 /* mark CPU as needing processing */
4904 DPCPU_SET(pmc_sampled, 1);
4908 * Process saved PC samples.
4912 pmc_process_samples(int cpu, ring_type_t ring)
4917 struct pmc_owner *po;
4918 struct pmc_sample *ps;
4919 struct pmc_classdep *pcd;
4920 struct pmc_samplebuffer *psb;
4923 KASSERT(PCPU_GET(cpuid) == cpu,
4924 ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", __LINE__,
4925 PCPU_GET(cpuid), cpu));
4927 psb = pmc_pcpu[cpu]->pc_sb[ring];
4928 delta = psb->ps_prodidx - psb->ps_considx;
4929 MPASS(delta <= pmc_nsamples);
4930 MPASS(psb->ps_considx <= psb->ps_prodidx);
4931 for (n = 0; psb->ps_considx < psb->ps_prodidx; psb->ps_considx++, n++) {
4932 ps = PMC_CONS_SAMPLE(psb);
4934 if (__predict_false(ps->ps_nsamples == PMC_SAMPLE_FREE))
4937 /* skip non-running samples */
4938 if (pm->pm_state != PMC_STATE_RUNNING)
4941 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,
4942 ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm,
4943 (unsigned long)counter_u64_fetch(pm->pm_runcount)));
4947 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
4948 ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__,
4949 pm, PMC_TO_MODE(pm)));
4952 /* If there is a pending AST wait for completion */
4953 if (ps->ps_nsamples == PMC_USER_CALLCHAIN_PENDING) {
4954 /* if we've been waiting more than 1 tick to
4955 * collect a callchain for this record then
4956 * drop it and move on.
4958 if (ticks - ps->ps_ticks > 1) {
4960 * track how often we hit this as it will
4961 * preferentially lose user samples
4962 * for long running system calls
4964 counter_u64_add(pmc_stats.pm_overwrites, 1);
4967 /* Need a rescan at a later time. */
4968 DPCPU_SET(pmc_sampled, 1);
4972 PMCDBG6(SAM,OPS,1,"cpu=%d pm=%p n=%d fl=%x wr=%d rd=%d", cpu,
4973 pm, ps->ps_nsamples, ps->ps_flags,
4974 (int) (psb->ps_prodidx & pmc_sample_mask),
4975 (int) (psb->ps_considx & pmc_sample_mask));
4978 * If this is a process-mode PMC that is attached to
4979 * its owner, and if the PC is in user mode, update
4980 * profiling statistics like timer-based profiling
4983 * Otherwise, this is either a sampling-mode PMC that
4984 * is attached to a different process than its owner,
4985 * or a system-wide sampling PMC. Dispatch a log
4986 * entry to the PMC's owner process.
4988 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) {
4989 if (ps->ps_flags & PMC_CC_F_USERSPACE) {
4990 td = FIRST_THREAD_IN_PROC(po->po_owner);
4991 addupc_intr(td, ps->ps_pc[0], 1);
4994 pmclog_process_callchain(pm, ps);
4997 ps->ps_nsamples = 0; /* mark entry as free */
4998 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,
4999 ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm,
5000 (unsigned long)counter_u64_fetch(pm->pm_runcount)));
5002 counter_u64_add(pm->pm_runcount, -1);
5005 counter_u64_add(pmc_stats.pm_log_sweeps, 1);
5007 /* Do not re-enable stalled PMCs if we failed to process any samples */
5012 * Restart any stalled sampling PMCs on this CPU.
5014 * If the NMI handler sets the pm_stalled field of a PMC after
5015 * the check below, we'll end up processing the stalled PMC at
5016 * the next hardclock tick.
5018 for (n = 0; n < md->pmd_npmc; n++) {
5019 pcd = pmc_ri_to_classdep(md, n, &adjri);
5020 KASSERT(pcd != NULL,
5021 ("[pmc,%d] null pcd ri=%d", __LINE__, n));
5022 (void) (*pcd->pcd_get_config)(cpu,adjri,&pm);
5024 if (pm == NULL || /* !cfg'ed */
5025 pm->pm_state != PMC_STATE_RUNNING || /* !active */
5026 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) || /* !sampling */
5027 !pm->pm_pcpu_state[cpu].pps_cpustate || /* !desired */
5028 !pm->pm_pcpu_state[cpu].pps_stalled) /* !stalled */
5031 pm->pm_pcpu_state[cpu].pps_stalled = 0;
5032 (*pcd->pcd_start_pmc)(cpu, adjri);
5041 * Handle a process exit.
5043 * Remove this process from all hash tables. If this process
5044 * owned any PMCs, turn off those PMCs and deallocate them,
5045 * removing any associations with target processes.
5047 * This function will be called by the last 'thread' of a
5050 * XXX This eventhandler gets called early in the exit process.
5051 * Consider using a 'hook' invocation from thread_exit() or equivalent
5052 * spot. Another negative is that kse_exit doesn't seem to call
5058 pmc_process_exit(void *arg __unused, struct proc *p)
5063 int is_using_hwpmcs;
5064 struct pmc_owner *po;
5065 struct pmc_process *pp;
5066 struct pmc_classdep *pcd;
5067 pmc_value_t newvalue, tmp;
5070 is_using_hwpmcs = p->p_flag & P_HWPMC;
5074 * Log a sysexit event to all SS PMC owners.
5077 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
5078 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
5079 pmclog_process_sysexit(po, p->p_pid);
5082 if (!is_using_hwpmcs)
5086 PMCDBG3(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid,
5090 * Since this code is invoked by the last thread in an exiting
5091 * process, we would have context switched IN at some prior
5092 * point. However, with PREEMPTION, kernel mode context
5093 * switches may happen any time, so we want to disable a
5094 * context switch OUT till we get any PMCs targeting this
5095 * process off the hardware.
5097 * We also need to atomically remove this process'
5098 * entry from our target process hash table, using
5101 PMCDBG3(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid,
5104 critical_enter(); /* no preemption */
5106 cpu = curthread->td_oncpu;
5108 if ((pp = pmc_find_process_descriptor(p,
5109 PMC_FLAG_REMOVE)) != NULL) {
5112 "process-exit proc=%p pmc-process=%p", p, pp);
5115 * The exiting process could the target of
5116 * some PMCs which will be running on
5117 * currently executing CPU.
5119 * We need to turn these PMCs off like we
5120 * would do at context switch OUT time.
5122 for (ri = 0; ri < md->pmd_npmc; ri++) {
5125 * Pick up the pmc pointer from hardware
5126 * state similar to the CSW_OUT code.
5130 pcd = pmc_ri_to_classdep(md, ri, &adjri);
5132 (void) (*pcd->pcd_get_config)(cpu, adjri, &pm);
5134 PMCDBG2(PRC,EXT,2, "ri=%d pm=%p", ri, pm);
5137 !PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
5140 PMCDBG4(PRC,EXT,2, "ppmcs[%d]=%p pm=%p "
5141 "state=%d", ri, pp->pp_pmcs[ri].pp_pmc,
5144 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
5145 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
5146 __LINE__, PMC_TO_ROWINDEX(pm), ri));
5148 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
5149 ("[pmc,%d] pm %p != pp_pmcs[%d] %p",
5150 __LINE__, pm, ri, pp->pp_pmcs[ri].pp_pmc));
5152 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,
5153 ("[pmc,%d] bad runcount ri %d rc %ld",
5154 __LINE__, ri, (unsigned long)counter_u64_fetch(pm->pm_runcount)));
5157 * Change desired state, and then stop if not
5158 * stalled. This two-step dance should avoid
5159 * race conditions where an interrupt re-enables
5160 * the PMC after this code has already checked
5161 * the pm_stalled flag.
5163 if (pm->pm_pcpu_state[cpu].pps_cpustate) {
5164 pm->pm_pcpu_state[cpu].pps_cpustate = 0;
5165 if (!pm->pm_pcpu_state[cpu].pps_stalled) {
5166 (void) pcd->pcd_stop_pmc(cpu, adjri);
5168 if (PMC_TO_MODE(pm) == PMC_MODE_TC) {
5169 pcd->pcd_read_pmc(cpu, adjri,
5172 PMC_PCPU_SAVED(cpu,ri);
5174 mtx_pool_lock_spin(pmc_mtxpool,
5176 pm->pm_gv.pm_savedvalue += tmp;
5177 pp->pp_pmcs[ri].pp_pmcval +=
5179 mtx_pool_unlock_spin(
5185 KASSERT((int64_t) counter_u64_fetch(pm->pm_runcount) > 0,
5186 ("[pmc,%d] runcount is %d", __LINE__, ri));
5188 counter_u64_add(pm->pm_runcount, -1);
5190 (void) pcd->pcd_config_pmc(cpu, adjri, NULL);
5194 * Inform the MD layer of this pseudo "context switch
5197 (void) md->pmd_switch_out(pmc_pcpu[cpu], pp);
5199 critical_exit(); /* ok to be pre-empted now */
5202 * Unlink this process from the PMCs that are
5203 * targeting it. This will send a signal to
5204 * all PMC owner's whose PMCs are orphaned.
5206 * Log PMC value at exit time if requested.
5208 for (ri = 0; ri < md->pmd_npmc; ri++)
5209 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
5210 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
5211 PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm)))
5212 pmclog_process_procexit(pm, pp);
5213 pmc_unlink_target_process(pm, pp);
5218 critical_exit(); /* pp == NULL */
5222 * If the process owned PMCs, free them up and free up
5225 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
5226 pmc_remove_owner(po);
5227 pmc_destroy_owner_descriptor(po);
5230 sx_xunlock(&pmc_sx);
5234 * Handle a process fork.
5236 * If the parent process 'p1' is under HWPMC monitoring, then copy
5237 * over any attached PMCs that have 'do_descendants' semantics.
5241 pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *newproc,
5244 int is_using_hwpmcs;
5246 uint32_t do_descendants;
5248 struct pmc_owner *po;
5249 struct pmc_process *ppnew, *ppold;
5251 (void) flags; /* unused parameter */
5254 is_using_hwpmcs = p1->p_flag & P_HWPMC;
5258 * If there are system-wide sampling PMCs active, we need to
5259 * log all fork events to their owner's logs.
5262 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
5263 if (po->po_flags & PMC_PO_OWNS_LOGFILE) {
5264 pmclog_process_procfork(po, p1->p_pid, newproc->p_pid);
5265 pmclog_process_proccreate(po, newproc, 1);
5269 if (!is_using_hwpmcs)
5273 PMCDBG4(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1,
5274 p1->p_pid, p1->p_comm, newproc);
5277 * If the parent process (curthread->td_proc) is a
5278 * target of any PMCs, look for PMCs that are to be
5279 * inherited, and link these into the new process
5282 if ((ppold = pmc_find_process_descriptor(curthread->td_proc,
5283 PMC_FLAG_NONE)) == NULL)
5284 goto done; /* nothing to do */
5287 for (ri = 0; ri < md->pmd_npmc; ri++)
5288 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL)
5289 do_descendants |= pm->pm_flags & PMC_F_DESCENDANTS;
5290 if (do_descendants == 0) /* nothing to do */
5294 * Now mark the new process as being tracked by this driver.
5297 newproc->p_flag |= P_HWPMC;
5298 PROC_UNLOCK(newproc);
5300 /* allocate a descriptor for the new process */
5301 if ((ppnew = pmc_find_process_descriptor(newproc,
5302 PMC_FLAG_ALLOCATE)) == NULL)
5306 * Run through all PMCs that were targeting the old process
5307 * and which specified F_DESCENDANTS and attach them to the
5310 * Log the fork event to all owners of PMCs attached to this
5311 * process, if not already logged.
5313 for (ri = 0; ri < md->pmd_npmc; ri++)
5314 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL &&
5315 (pm->pm_flags & PMC_F_DESCENDANTS)) {
5316 pmc_link_target_process(pm, ppnew);
5318 if (po->po_sscount == 0 &&
5319 po->po_flags & PMC_PO_OWNS_LOGFILE)
5320 pmclog_process_procfork(po, p1->p_pid,
5325 sx_xunlock(&pmc_sx);
5329 pmc_process_threadcreate(struct thread *td)
5331 struct pmc_owner *po;
5334 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
5335 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
5336 pmclog_process_threadcreate(po, td, 1);
5341 pmc_process_threadexit(struct thread *td)
5343 struct pmc_owner *po;
5346 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
5347 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
5348 pmclog_process_threadexit(po, td);
5353 pmc_process_proccreate(struct proc *p)
5355 struct pmc_owner *po;
5358 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
5359 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
5360 pmclog_process_proccreate(po, p, 1 /* sync */);
5365 pmc_process_allproc(struct pmc *pm)
5367 struct pmc_owner *po;
5372 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
5374 sx_slock(&allproc_lock);
5375 FOREACH_PROC_IN_SYSTEM(p) {
5376 pmclog_process_proccreate(po, p, 0 /* sync */);
5378 FOREACH_THREAD_IN_PROC(p, td)
5379 pmclog_process_threadcreate(po, td, 0 /* sync */);
5382 sx_sunlock(&allproc_lock);
5383 pmclog_flush(po, 0);
5387 pmc_kld_load(void *arg __unused, linker_file_t lf)
5389 struct pmc_owner *po;
5392 * Notify owners of system sampling PMCs about KLD operations.
5395 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
5396 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
5397 pmclog_process_map_in(po, (pid_t) -1,
5398 (uintfptr_t) lf->address, lf->filename);
5402 * TODO: Notify owners of (all) process-sampling PMCs too.
5407 pmc_kld_unload(void *arg __unused, const char *filename __unused,
5408 caddr_t address, size_t size)
5410 struct pmc_owner *po;
5413 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
5414 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
5415 pmclog_process_map_out(po, (pid_t) -1,
5416 (uintfptr_t) address, (uintfptr_t) address + size);
5420 * TODO: Notify owners of process-sampling PMCs.
5428 pmc_name_of_pmcclass(enum pmc_class class)
5433 #define __PMC_CLASS(S,V,D) \
5434 case PMC_CLASS_##S: \
5438 return ("<unknown>");
5443 * Base class initializer: allocate structure and set default classes.
5446 pmc_mdep_alloc(int nclasses)
5448 struct pmc_mdep *md;
5451 /* SOFT + md classes */
5453 md = malloc(sizeof(struct pmc_mdep) + n *
5454 sizeof(struct pmc_classdep), M_PMC, M_WAITOK|M_ZERO);
5457 /* Add base class. */
5458 pmc_soft_initialize(md);
5463 pmc_mdep_free(struct pmc_mdep *md)
5465 pmc_soft_finalize(md);
5470 generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
5472 (void) pc; (void) pp;
5478 generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
5480 (void) pc; (void) pp;
5485 static struct pmc_mdep *
5486 pmc_generic_cpu_initialize(void)
5488 struct pmc_mdep *md;
5490 md = pmc_mdep_alloc(0);
5492 md->pmd_cputype = PMC_CPU_GENERIC;
5494 md->pmd_pcpu_init = NULL;
5495 md->pmd_pcpu_fini = NULL;
5496 md->pmd_switch_in = generic_switch_in;
5497 md->pmd_switch_out = generic_switch_out;
5503 pmc_generic_cpu_finalize(struct pmc_mdep *md)
5510 pmc_initialize(void)
5512 int c, cpu, error, n, ri;
5513 unsigned int maxcpu, domain;
5515 struct pmc_binding pb;
5516 struct pmc_sample *ps;
5517 struct pmc_classdep *pcd;
5518 struct pmc_samplebuffer *sb;
5523 pmc_stats.pm_intr_ignored = counter_u64_alloc(M_WAITOK);
5524 pmc_stats.pm_intr_processed = counter_u64_alloc(M_WAITOK);
5525 pmc_stats.pm_intr_bufferfull = counter_u64_alloc(M_WAITOK);
5526 pmc_stats.pm_syscalls = counter_u64_alloc(M_WAITOK);
5527 pmc_stats.pm_syscall_errors = counter_u64_alloc(M_WAITOK);
5528 pmc_stats.pm_buffer_requests = counter_u64_alloc(M_WAITOK);
5529 pmc_stats.pm_buffer_requests_failed = counter_u64_alloc(M_WAITOK);
5530 pmc_stats.pm_log_sweeps = counter_u64_alloc(M_WAITOK);
5531 pmc_stats.pm_merges = counter_u64_alloc(M_WAITOK);
5532 pmc_stats.pm_overwrites = counter_u64_alloc(M_WAITOK);
5535 /* parse debug flags first */
5536 if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags",
5537 pmc_debugstr, sizeof(pmc_debugstr)))
5538 pmc_debugflags_parse(pmc_debugstr,
5539 pmc_debugstr+strlen(pmc_debugstr));
5542 PMCDBG1(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION);
5544 /* check kernel version */
5545 if (pmc_kernel_version != PMC_VERSION) {
5546 if (pmc_kernel_version == 0)
5547 printf("hwpmc: this kernel has not been compiled with "
5548 "'options HWPMC_HOOKS'.\n");
5550 printf("hwpmc: kernel version (0x%x) does not match "
5551 "module version (0x%x).\n", pmc_kernel_version,
5553 return EPROGMISMATCH;
5557 * check sysctl parameters
5560 if (pmc_hashsize <= 0) {
5561 (void) printf("hwpmc: tunable \"hashsize\"=%d must be "
5562 "greater than zero.\n", pmc_hashsize);
5563 pmc_hashsize = PMC_HASH_SIZE;
5566 if (pmc_nsamples <= 0 || pmc_nsamples > 65535) {
5567 (void) printf("hwpmc: tunable \"nsamples\"=%d out of "
5568 "range.\n", pmc_nsamples);
5569 pmc_nsamples = PMC_NSAMPLES;
5571 pmc_sample_mask = pmc_nsamples-1;
5573 if (pmc_callchaindepth <= 0 ||
5574 pmc_callchaindepth > PMC_CALLCHAIN_DEPTH_MAX) {
5575 (void) printf("hwpmc: tunable \"callchaindepth\"=%d out of "
5576 "range - using %d.\n", pmc_callchaindepth,
5577 PMC_CALLCHAIN_DEPTH_MAX);
5578 pmc_callchaindepth = PMC_CALLCHAIN_DEPTH_MAX;
5581 md = pmc_md_initialize();
5583 /* Default to generic CPU. */
5584 md = pmc_generic_cpu_initialize();
5589 KASSERT(md->pmd_nclass >= 1 && md->pmd_npmc >= 1,
5590 ("[pmc,%d] no classes or pmcs", __LINE__));
5592 /* Compute the map from row-indices to classdep pointers. */
5593 pmc_rowindex_to_classdep = malloc(sizeof(struct pmc_classdep *) *
5594 md->pmd_npmc, M_PMC, M_WAITOK|M_ZERO);
5596 for (n = 0; n < md->pmd_npmc; n++)
5597 pmc_rowindex_to_classdep[n] = NULL;
5598 for (ri = c = 0; c < md->pmd_nclass; c++) {
5599 pcd = &md->pmd_classdep[c];
5600 for (n = 0; n < pcd->pcd_num; n++, ri++)
5601 pmc_rowindex_to_classdep[ri] = pcd;
5604 KASSERT(ri == md->pmd_npmc,
5605 ("[pmc,%d] npmc miscomputed: ri=%d, md->npmc=%d", __LINE__,
5608 maxcpu = pmc_cpu_max();
5610 /* allocate space for the per-cpu array */
5611 pmc_pcpu = malloc(maxcpu * sizeof(struct pmc_cpu *), M_PMC,
5614 /* per-cpu 'saved values' for managing process-mode PMCs */
5615 pmc_pcpu_saved = malloc(sizeof(pmc_value_t) * maxcpu * md->pmd_npmc,
5618 /* Perform CPU-dependent initialization. */
5619 pmc_save_cpu_binding(&pb);
5621 for (cpu = 0; error == 0 && cpu < maxcpu; cpu++) {
5622 if (!pmc_cpu_is_active(cpu))
5624 pmc_select_cpu(cpu);
5625 pmc_pcpu[cpu] = malloc(sizeof(struct pmc_cpu) +
5626 md->pmd_npmc * sizeof(struct pmc_hw *), M_PMC,
5628 if (md->pmd_pcpu_init)
5629 error = md->pmd_pcpu_init(md, cpu);
5630 for (n = 0; error == 0 && n < md->pmd_nclass; n++)
5631 error = md->pmd_classdep[n].pcd_pcpu_init(md, cpu);
5633 pmc_restore_cpu_binding(&pb);
5638 /* allocate space for the sample array */
5639 for (cpu = 0; cpu < maxcpu; cpu++) {
5640 if (!pmc_cpu_is_active(cpu))
5642 pc = pcpu_find(cpu);
5643 domain = pc->pc_domain;
5644 sb = malloc_domainset(sizeof(struct pmc_samplebuffer) +
5645 pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
5646 DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
5648 KASSERT(pmc_pcpu[cpu] != NULL,
5649 ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
5651 sb->ps_callchains = malloc_domainset(pmc_callchaindepth *
5652 pmc_nsamples * sizeof(uintptr_t), M_PMC,
5653 DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
5655 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
5656 ps->ps_pc = sb->ps_callchains +
5657 (n * pmc_callchaindepth);
5659 pmc_pcpu[cpu]->pc_sb[PMC_HR] = sb;
5661 sb = malloc_domainset(sizeof(struct pmc_samplebuffer) +
5662 pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
5663 DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
5665 sb->ps_callchains = malloc_domainset(pmc_callchaindepth *
5666 pmc_nsamples * sizeof(uintptr_t), M_PMC,
5667 DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
5668 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
5669 ps->ps_pc = sb->ps_callchains +
5670 (n * pmc_callchaindepth);
5672 pmc_pcpu[cpu]->pc_sb[PMC_SR] = sb;
5674 sb = malloc_domainset(sizeof(struct pmc_samplebuffer) +
5675 pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
5676 DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
5677 sb->ps_callchains = malloc_domainset(pmc_callchaindepth *
5678 pmc_nsamples * sizeof(uintptr_t), M_PMC,
5679 DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
5680 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
5681 ps->ps_pc = sb->ps_callchains + n * pmc_callchaindepth;
5683 pmc_pcpu[cpu]->pc_sb[PMC_UR] = sb;
5686 /* allocate space for the row disposition array */
5687 pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc,
5688 M_PMC, M_WAITOK|M_ZERO);
5690 /* mark all PMCs as available */
5691 for (n = 0; n < (int) md->pmd_npmc; n++)
5692 PMC_MARK_ROW_FREE(n);
5694 /* allocate thread hash tables */
5695 pmc_ownerhash = hashinit(pmc_hashsize, M_PMC,
5696 &pmc_ownerhashmask);
5698 pmc_processhash = hashinit(pmc_hashsize, M_PMC,
5699 &pmc_processhashmask);
5700 mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf",
5703 CK_LIST_INIT(&pmc_ss_owners);
5706 /* allocate a pool of spin mutexes */
5707 pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size,
5710 PMCDBG4(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx "
5711 "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask,
5712 pmc_processhash, pmc_processhashmask);
5714 /* Initialize a spin mutex for the thread free list. */
5715 mtx_init(&pmc_threadfreelist_mtx, "pmc-threadfreelist", "pmc-leaf",
5718 /* Initialize the task to prune the thread free list. */
5719 TASK_INIT(&free_task, 0, pmc_thread_descriptor_pool_free_task, NULL);
5721 /* register process {exit,fork,exec} handlers */
5722 pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit,
5723 pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY);
5724 pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork,
5725 pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY);
5727 /* register kld event handlers */
5728 pmc_kld_load_tag = EVENTHANDLER_REGISTER(kld_load, pmc_kld_load,
5729 NULL, EVENTHANDLER_PRI_ANY);
5730 pmc_kld_unload_tag = EVENTHANDLER_REGISTER(kld_unload, pmc_kld_unload,
5731 NULL, EVENTHANDLER_PRI_ANY);
5733 /* initialize logging */
5734 pmclog_initialize();
5736 /* set hook functions */
5737 pmc_intr = md->pmd_intr;
5739 pmc_hook = pmc_hook_handler;
5742 printf(PMC_MODULE_NAME ":");
5743 for (n = 0; n < (int) md->pmd_nclass; n++) {
5744 pcd = &md->pmd_classdep[n];
5745 printf(" %s/%d/%d/0x%b",
5746 pmc_name_of_pmcclass(pcd->pcd_class),
5751 "\1INT\2USR\3SYS\4EDG\5THR"
5752 "\6REA\7WRI\10INV\11QUA\12PRC"
5761 /* prepare to be unloaded */
5766 unsigned int maxcpu;
5767 struct pmc_ownerhash *ph;
5768 struct pmc_owner *po, *tmp;
5769 struct pmc_binding pb;
5771 struct pmc_processhash *prh;
5774 PMCDBG0(MOD,INI,0, "cleanup");
5776 /* switch off sampling */
5778 DPCPU_ID_SET(cpu, pmc_sampled, 0);
5782 if (pmc_hook == NULL) { /* being unloaded already */
5783 sx_xunlock(&pmc_sx);
5787 pmc_hook = NULL; /* prevent new threads from entering module */
5789 /* deregister event handlers */
5790 EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag);
5791 EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag);
5792 EVENTHANDLER_DEREGISTER(kld_load, pmc_kld_load_tag);
5793 EVENTHANDLER_DEREGISTER(kld_unload, pmc_kld_unload_tag);
5795 /* send SIGBUS to all owner threads, free up allocations */
5797 for (ph = pmc_ownerhash;
5798 ph <= &pmc_ownerhash[pmc_ownerhashmask];
5800 LIST_FOREACH_SAFE(po, ph, po_next, tmp) {
5801 pmc_remove_owner(po);
5803 /* send SIGBUS to owner processes */
5804 PMCDBG3(MOD,INI,2, "cleanup signal proc=%p "
5805 "(%d, %s)", po->po_owner,
5806 po->po_owner->p_pid,
5807 po->po_owner->p_comm);
5809 PROC_LOCK(po->po_owner);
5810 kern_psignal(po->po_owner, SIGBUS);
5811 PROC_UNLOCK(po->po_owner);
5813 pmc_destroy_owner_descriptor(po);
5817 /* reclaim allocated data structures */
5818 taskqueue_drain(taskqueue_fast, &free_task);
5819 mtx_destroy(&pmc_threadfreelist_mtx);
5820 pmc_thread_descriptor_pool_drain();
5823 mtx_pool_destroy(&pmc_mtxpool);
5825 mtx_destroy(&pmc_processhash_mtx);
5826 if (pmc_processhash) {
5828 struct pmc_process *pp;
5830 PMCDBG0(MOD,INI,3, "destroy process hash");
5831 for (prh = pmc_processhash;
5832 prh <= &pmc_processhash[pmc_processhashmask];
5834 LIST_FOREACH(pp, prh, pp_next)
5835 PMCDBG1(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid);
5838 hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask);
5839 pmc_processhash = NULL;
5842 if (pmc_ownerhash) {
5843 PMCDBG0(MOD,INI,3, "destroy owner hash");
5844 hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask);
5845 pmc_ownerhash = NULL;
5848 KASSERT(CK_LIST_EMPTY(&pmc_ss_owners),
5849 ("[pmc,%d] Global SS owner list not empty", __LINE__));
5850 KASSERT(pmc_ss_count == 0,
5851 ("[pmc,%d] Global SS count not empty", __LINE__));
5853 /* do processor and pmc-class dependent cleanup */
5854 maxcpu = pmc_cpu_max();
5856 PMCDBG0(MOD,INI,3, "md cleanup");
5858 pmc_save_cpu_binding(&pb);
5859 for (cpu = 0; cpu < maxcpu; cpu++) {
5860 PMCDBG2(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p",
5861 cpu, pmc_pcpu[cpu]);
5862 if (!pmc_cpu_is_active(cpu) || pmc_pcpu[cpu] == NULL)
5864 pmc_select_cpu(cpu);
5865 for (c = 0; c < md->pmd_nclass; c++)
5866 md->pmd_classdep[c].pcd_pcpu_fini(md, cpu);
5867 if (md->pmd_pcpu_fini)
5868 md->pmd_pcpu_fini(md, cpu);
5871 if (md->pmd_cputype == PMC_CPU_GENERIC)
5872 pmc_generic_cpu_finalize(md);
5874 pmc_md_finalize(md);
5878 pmc_restore_cpu_binding(&pb);
5881 /* Free per-cpu descriptors. */
5882 for (cpu = 0; cpu < maxcpu; cpu++) {
5883 if (!pmc_cpu_is_active(cpu))
5885 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_HR] != NULL,
5886 ("[pmc,%d] Null hw cpu sample buffer cpu=%d", __LINE__,
5888 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_SR] != NULL,
5889 ("[pmc,%d] Null sw cpu sample buffer cpu=%d", __LINE__,
5891 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_UR] != NULL,
5892 ("[pmc,%d] Null userret cpu sample buffer cpu=%d", __LINE__,
5894 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_HR]->ps_callchains, M_PMC);
5895 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_HR], M_PMC);
5896 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_SR]->ps_callchains, M_PMC);
5897 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_SR], M_PMC);
5898 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_UR]->ps_callchains, M_PMC);
5899 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_UR], M_PMC);
5900 free_domain(pmc_pcpu[cpu], M_PMC);
5903 free(pmc_pcpu, M_PMC);
5906 free(pmc_pcpu_saved, M_PMC);
5907 pmc_pcpu_saved = NULL;
5910 free(pmc_pmcdisp, M_PMC);
5914 if (pmc_rowindex_to_classdep) {
5915 free(pmc_rowindex_to_classdep, M_PMC);
5916 pmc_rowindex_to_classdep = NULL;
5920 counter_u64_free(pmc_stats.pm_intr_ignored);
5921 counter_u64_free(pmc_stats.pm_intr_processed);
5922 counter_u64_free(pmc_stats.pm_intr_bufferfull);
5923 counter_u64_free(pmc_stats.pm_syscalls);
5924 counter_u64_free(pmc_stats.pm_syscall_errors);
5925 counter_u64_free(pmc_stats.pm_buffer_requests);
5926 counter_u64_free(pmc_stats.pm_buffer_requests_failed);
5927 counter_u64_free(pmc_stats.pm_log_sweeps);
5928 counter_u64_free(pmc_stats.pm_merges);
5929 counter_u64_free(pmc_stats.pm_overwrites);
5930 sx_xunlock(&pmc_sx); /* we are done */
5934 * The function called at load/unload.
5938 load (struct module *module __unused, int cmd, void *arg __unused)
5946 /* initialize the subsystem */
5947 error = pmc_initialize();
5950 PMCDBG2(MOD,INI,1, "syscall=%d maxcpu=%d",
5951 pmc_syscall_num, pmc_cpu_max());
5958 PMCDBG0(MOD,INI,1, "unloaded");
5962 error = EINVAL; /* XXX should panic(9) */