2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2003-2008 Joseph Koshy
5 * Copyright (c) 2007 The FreeBSD Foundation
6 * Copyright (c) 2018 Matthew Macy
9 * Portions of this software were developed by A. Joseph Koshy under
10 * sponsorship from the FreeBSD Foundation and Google, Inc.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/eventhandler.h>
40 #include <sys/gtaskqueue.h>
42 #include <sys/kernel.h>
43 #include <sys/kthread.h>
44 #include <sys/limits.h>
46 #include <sys/malloc.h>
47 #include <sys/module.h>
48 #include <sys/mount.h>
49 #include <sys/mutex.h>
51 #include <sys/pmckern.h>
52 #include <sys/pmclog.h>
55 #include <sys/queue.h>
56 #include <sys/resourcevar.h>
57 #include <sys/rwlock.h>
58 #include <sys/sched.h>
59 #include <sys/signalvar.h>
62 #include <sys/sysctl.h>
63 #include <sys/sysent.h>
64 #include <sys/systm.h>
65 #include <sys/vnode.h>
67 #include <sys/linker.h> /* needs to be after <sys/malloc.h> */
69 #include <machine/atomic.h>
70 #include <machine/md_var.h>
73 #include <vm/vm_extern.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_object.h>
78 #include "hwpmc_soft.h"
81 #define NDOMAINS vm_ndomains
84 #define malloc_domain(size, type, domain, flags) malloc((size), (type), (flags))
85 #define free_domain(addr, type) free(addr, type)
93 PMC_FLAG_NONE = 0x00, /* do nothing */
94 PMC_FLAG_REMOVE = 0x01, /* atomically remove entry from hash */
95 PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */
96 PMC_FLAG_NOWAIT = 0x04, /* do not wait for mallocs */
100 * The offset in sysent where the syscall is allocated.
103 static int pmc_syscall_num = NO_SYSCALL;
104 struct pmc_cpu **pmc_pcpu; /* per-cpu state */
105 pmc_value_t *pmc_pcpu_saved; /* saved PMC values: CSW handling */
107 #define PMC_PCPU_SAVED(C,R) pmc_pcpu_saved[(R) + md->pmd_npmc*(C)]
109 struct mtx_pool *pmc_mtxpool;
110 static int *pmc_pmcdisp; /* PMC row dispositions */
112 #define PMC_ROW_DISP_IS_FREE(R) (pmc_pmcdisp[(R)] == 0)
113 #define PMC_ROW_DISP_IS_THREAD(R) (pmc_pmcdisp[(R)] > 0)
114 #define PMC_ROW_DISP_IS_STANDALONE(R) (pmc_pmcdisp[(R)] < 0)
116 #define PMC_MARK_ROW_FREE(R) do { \
117 pmc_pmcdisp[(R)] = 0; \
120 #define PMC_MARK_ROW_STANDALONE(R) do { \
121 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
123 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
124 KASSERT(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active()), \
125 ("[pmc,%d] row disposition error", __LINE__)); \
128 #define PMC_UNMARK_ROW_STANDALONE(R) do { \
129 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
130 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
134 #define PMC_MARK_ROW_THREAD(R) do { \
135 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
137 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
140 #define PMC_UNMARK_ROW_THREAD(R) do { \
141 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
142 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
147 /* various event handlers */
148 static eventhandler_tag pmc_exit_tag, pmc_fork_tag, pmc_kld_load_tag,
151 /* Module statistics */
152 struct pmc_driverstats pmc_stats;
155 /* Machine/processor dependent operations */
156 static struct pmc_mdep *md;
159 * Hash tables mapping owner processes and target threads to PMCs.
162 struct mtx pmc_processhash_mtx; /* spin mutex */
163 static u_long pmc_processhashmask;
164 static LIST_HEAD(pmc_processhash, pmc_process) *pmc_processhash;
167 * Hash table of PMC owner descriptors. This table is protected by
168 * the shared PMC "sx" lock.
171 static u_long pmc_ownerhashmask;
172 static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash;
175 * List of PMC owners with system-wide sampling PMCs.
178 static LIST_HEAD(, pmc_owner) pmc_ss_owners;
181 * List of free thread entries. This is protected by the spin
184 static struct mtx pmc_threadfreelist_mtx; /* spin mutex */
185 static LIST_HEAD(, pmc_thread) pmc_threadfreelist;
186 static int pmc_threadfreelist_entries=0;
187 #define THREADENTRY_SIZE \
188 (sizeof(struct pmc_thread) + (md->pmd_npmc * sizeof(struct pmc_threadpmcstate)))
191 * Task to free thread descriptors
193 static struct grouptask free_gtask;
196 * A map of row indices to classdep structures.
198 static struct pmc_classdep **pmc_rowindex_to_classdep;
205 static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS);
206 static int pmc_debugflags_parse(char *newstr, char *fence);
209 static int load(struct module *module, int cmd, void *arg);
210 static void pmc_add_thread_descriptors_from_proc(struct proc *p,
211 struct pmc_process *pp);
212 static int pmc_attach_process(struct proc *p, struct pmc *pm);
213 static struct pmc *pmc_allocate_pmc_descriptor(void);
214 static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p);
215 static int pmc_attach_one_process(struct proc *p, struct pmc *pm);
216 static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri,
218 static int pmc_can_attach(struct pmc *pm, struct proc *p);
219 static void pmc_capture_user_callchain(int cpu, int soft, struct trapframe *tf);
220 static void pmc_cleanup(void);
221 static int pmc_detach_process(struct proc *p, struct pmc *pm);
222 static int pmc_detach_one_process(struct proc *p, struct pmc *pm,
224 static void pmc_destroy_owner_descriptor(struct pmc_owner *po);
225 static void pmc_destroy_pmc_descriptor(struct pmc *pm);
226 static void pmc_destroy_process_descriptor(struct pmc_process *pp);
227 static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p);
228 static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm);
229 static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po,
231 static struct pmc_process *pmc_find_process_descriptor(struct proc *p,
233 static struct pmc_thread *pmc_find_thread_descriptor(struct pmc_process *pp,
234 struct thread *td, uint32_t mode);
235 static void pmc_force_context_switch(void);
236 static void pmc_link_target_process(struct pmc *pm,
237 struct pmc_process *pp);
238 static void pmc_log_all_process_mappings(struct pmc_owner *po);
239 static void pmc_log_kernel_mappings(struct pmc *pm);
240 static void pmc_log_process_mappings(struct pmc_owner *po, struct proc *p);
241 static void pmc_maybe_remove_owner(struct pmc_owner *po);
242 static void pmc_process_csw_in(struct thread *td);
243 static void pmc_process_csw_out(struct thread *td);
244 static void pmc_process_exit(void *arg, struct proc *p);
245 static void pmc_process_fork(void *arg, struct proc *p1,
246 struct proc *p2, int n);
247 static void pmc_process_samples(int cpu, int soft);
248 static void pmc_release_pmc_descriptor(struct pmc *pmc);
249 static void pmc_process_thread_add(struct thread *td);
250 static void pmc_process_thread_delete(struct thread *td);
251 static void pmc_remove_owner(struct pmc_owner *po);
252 static void pmc_remove_process_descriptor(struct pmc_process *pp);
253 static void pmc_restore_cpu_binding(struct pmc_binding *pb);
254 static void pmc_save_cpu_binding(struct pmc_binding *pb);
255 static void pmc_select_cpu(int cpu);
256 static int pmc_start(struct pmc *pm);
257 static int pmc_stop(struct pmc *pm);
258 static int pmc_syscall_handler(struct thread *td, void *syscall_args);
259 static struct pmc_thread *pmc_thread_descriptor_pool_alloc(void);
260 static void pmc_thread_descriptor_pool_drain(void);
261 static void pmc_thread_descriptor_pool_free(struct pmc_thread *pt);
262 static void pmc_unlink_target_process(struct pmc *pmc,
263 struct pmc_process *pp);
264 static int generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp);
265 static int generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp);
266 static struct pmc_mdep *pmc_generic_cpu_initialize(void);
267 static void pmc_generic_cpu_finalize(struct pmc_mdep *md);
270 * Kernel tunables and sysctl(8) interface.
273 SYSCTL_DECL(_kern_hwpmc);
274 SYSCTL_NODE(_kern_hwpmc, OID_AUTO, stats, CTLFLAG_RW, 0, "HWPMC stats");
278 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_ignored, CTLFLAG_RW,
279 &pmc_stats.pm_intr_ignored, "# of interrupts ignored");
280 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_processed, CTLFLAG_RW,
281 &pmc_stats.pm_intr_processed, "# of interrupts processed");
282 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_bufferfull, CTLFLAG_RW,
283 &pmc_stats.pm_intr_bufferfull, "# of interrupts where buffer was full");
284 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, syscalls, CTLFLAG_RW,
285 &pmc_stats.pm_syscalls, "# of syscalls");
286 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, syscall_errors, CTLFLAG_RW,
287 &pmc_stats.pm_syscall_errors, "# of syscall_errors");
288 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, buffer_requests, CTLFLAG_RW,
289 &pmc_stats.pm_buffer_requests, "# of buffer requests");
290 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, buffer_requests_failed, CTLFLAG_RW,
291 &pmc_stats.pm_buffer_requests_failed, "# of buffer requests which failed");
292 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, log_sweeps, CTLFLAG_RW,
293 &pmc_stats.pm_log_sweeps, "# of ?");
295 static int pmc_callchaindepth = PMC_CALLCHAIN_DEPTH;
296 SYSCTL_INT(_kern_hwpmc, OID_AUTO, callchaindepth, CTLFLAG_RDTUN,
297 &pmc_callchaindepth, 0, "depth of call chain records");
300 SYSCTL_STRING(_kern_hwpmc, OID_AUTO, cpuid, CTLFLAG_RD,
301 pmc_cpuid, 0, "cpu version string");
303 struct pmc_debugflags pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS;
304 char pmc_debugstr[PMC_DEBUG_STRSIZE];
305 TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,
306 sizeof(pmc_debugstr));
307 SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
308 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
309 0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags");
314 * kern.hwpmc.hashrows -- determines the number of rows in the
315 * of the hash table used to look up threads
318 static int pmc_hashsize = PMC_HASH_SIZE;
319 SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_RDTUN,
320 &pmc_hashsize, 0, "rows in hash tables");
323 * kern.hwpmc.nsamples --- number of PC samples/callchain stacks per CPU
326 static int pmc_nsamples = PMC_NSAMPLES;
327 SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_RDTUN,
328 &pmc_nsamples, 0, "number of PC samples per CPU");
332 * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool.
335 static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE;
336 SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_RDTUN,
337 &pmc_mtxpool_size, 0, "size of spin mutex pool");
341 * kern.hwpmc.threadfreelist_entries -- number of free entries
344 SYSCTL_INT(_kern_hwpmc, OID_AUTO, threadfreelist_entries, CTLFLAG_RD,
345 &pmc_threadfreelist_entries, 0, "number of avalable thread entries");
349 * kern.hwpmc.threadfreelist_max -- maximum number of free entries
352 static int pmc_threadfreelist_max = PMC_THREADLIST_MAX;
353 SYSCTL_INT(_kern_hwpmc, OID_AUTO, threadfreelist_max, CTLFLAG_RW,
354 &pmc_threadfreelist_max, 0,
355 "maximum number of available thread entries before freeing some");
359 * security.bsd.unprivileged_syspmcs -- allow non-root processes to
360 * allocate system-wide PMCs.
362 * Allowing unprivileged processes to allocate system PMCs is convenient
363 * if system-wide measurements need to be taken concurrently with other
364 * per-process measurements. This feature is turned off by default.
367 static int pmc_unprivileged_syspmcs = 0;
368 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RWTUN,
369 &pmc_unprivileged_syspmcs, 0,
370 "allow unprivileged process to allocate system PMCs");
373 * Hash function. Discard the lower 2 bits of the pointer since
374 * these are always zero for our uses. The hash multiplier is
375 * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
379 #define _PMC_HM 11400714819323198486u
381 #define _PMC_HM 2654435769u
383 #error Must know the size of 'long' to compile
386 #define PMC_HASH_PTR(P,M) ((((unsigned long) (P) >> 2) * _PMC_HM) & (M))
392 /* The `sysent' for the new syscall */
393 static struct sysent pmc_sysent = {
395 .sy_call = pmc_syscall_handler,
398 static struct syscall_module_data pmc_syscall_mod = {
401 .offset = &pmc_syscall_num,
402 .new_sysent = &pmc_sysent,
403 .old_sysent = { .sy_narg = 0, .sy_call = NULL },
404 .flags = SY_THR_STATIC_KLD,
407 static moduledata_t pmc_mod = {
408 .name = PMC_MODULE_NAME,
409 .evhand = syscall_module_handler,
410 .priv = &pmc_syscall_mod,
413 #ifdef EARLY_AP_STARTUP
414 DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SYSCALLS, SI_ORDER_ANY);
416 DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY);
418 MODULE_VERSION(pmc, PMC_VERSION);
421 enum pmc_dbgparse_state {
422 PMCDS_WS, /* in whitespace */
423 PMCDS_MAJOR, /* seen a major keyword */
428 pmc_debugflags_parse(char *newstr, char *fence)
431 struct pmc_debugflags *tmpflags;
432 int error, found, *newbits, tmp;
435 tmpflags = malloc(sizeof(*tmpflags), M_PMC, M_WAITOK|M_ZERO);
440 for (; p < fence && (c = *p); p++) {
442 /* skip white space */
443 if (c == ' ' || c == '\t')
446 /* look for a keyword followed by "=" */
447 for (q = p; p < fence && (c = *p) && c != '='; p++)
457 /* lookup flag group name */
458 #define DBG_SET_FLAG_MAJ(S,F) \
459 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
460 newbits = &tmpflags->pdb_ ## F;
462 DBG_SET_FLAG_MAJ("cpu", CPU);
463 DBG_SET_FLAG_MAJ("csw", CSW);
464 DBG_SET_FLAG_MAJ("logging", LOG);
465 DBG_SET_FLAG_MAJ("module", MOD);
466 DBG_SET_FLAG_MAJ("md", MDP);
467 DBG_SET_FLAG_MAJ("owner", OWN);
468 DBG_SET_FLAG_MAJ("pmc", PMC);
469 DBG_SET_FLAG_MAJ("process", PRC);
470 DBG_SET_FLAG_MAJ("sampling", SAM);
472 if (newbits == NULL) {
477 p++; /* skip the '=' */
479 /* Now parse the individual flags */
482 for (q = p; p < fence && (c = *p); p++)
483 if (c == ' ' || c == '\t' || c == ',')
486 /* p == fence or c == ws or c == "," or c == 0 */
488 if ((kwlen = p - q) == 0) {
494 #define DBG_SET_FLAG_MIN(S,F) \
495 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
496 tmp |= found = (1 << PMC_DEBUG_MIN_ ## F)
498 /* a '*' denotes all possible flags in the group */
499 if (kwlen == 1 && *q == '*')
501 /* look for individual flag names */
502 DBG_SET_FLAG_MIN("allocaterow", ALR);
503 DBG_SET_FLAG_MIN("allocate", ALL);
504 DBG_SET_FLAG_MIN("attach", ATT);
505 DBG_SET_FLAG_MIN("bind", BND);
506 DBG_SET_FLAG_MIN("config", CFG);
507 DBG_SET_FLAG_MIN("exec", EXC);
508 DBG_SET_FLAG_MIN("exit", EXT);
509 DBG_SET_FLAG_MIN("find", FND);
510 DBG_SET_FLAG_MIN("flush", FLS);
511 DBG_SET_FLAG_MIN("fork", FRK);
512 DBG_SET_FLAG_MIN("getbuf", GTB);
513 DBG_SET_FLAG_MIN("hook", PMH);
514 DBG_SET_FLAG_MIN("init", INI);
515 DBG_SET_FLAG_MIN("intr", INT);
516 DBG_SET_FLAG_MIN("linktarget", TLK);
517 DBG_SET_FLAG_MIN("mayberemove", OMR);
518 DBG_SET_FLAG_MIN("ops", OPS);
519 DBG_SET_FLAG_MIN("read", REA);
520 DBG_SET_FLAG_MIN("register", REG);
521 DBG_SET_FLAG_MIN("release", REL);
522 DBG_SET_FLAG_MIN("remove", ORM);
523 DBG_SET_FLAG_MIN("sample", SAM);
524 DBG_SET_FLAG_MIN("scheduleio", SIO);
525 DBG_SET_FLAG_MIN("select", SEL);
526 DBG_SET_FLAG_MIN("signal", SIG);
527 DBG_SET_FLAG_MIN("swi", SWI);
528 DBG_SET_FLAG_MIN("swo", SWO);
529 DBG_SET_FLAG_MIN("start", STA);
530 DBG_SET_FLAG_MIN("stop", STO);
531 DBG_SET_FLAG_MIN("syscall", PMS);
532 DBG_SET_FLAG_MIN("unlinktarget", TUL);
533 DBG_SET_FLAG_MIN("write", WRI);
535 /* unrecognized flag name */
540 if (c == 0 || c == ' ' || c == '\t') { /* end of flag group */
549 /* save the new flag set */
550 bcopy(tmpflags, &pmc_debugflags, sizeof(pmc_debugflags));
553 free(tmpflags, M_PMC);
558 pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS)
560 char *fence, *newstr;
564 (void) arg1; (void) arg2; /* unused parameters */
566 n = sizeof(pmc_debugstr);
567 newstr = malloc(n, M_PMC, M_WAITOK|M_ZERO);
568 (void) strlcpy(newstr, pmc_debugstr, n);
570 error = sysctl_handle_string(oidp, newstr, n, req);
572 /* if there is a new string, parse and copy it */
573 if (error == 0 && req->newptr != NULL) {
574 fence = newstr + (n < req->newlen ? n : req->newlen + 1);
575 if ((error = pmc_debugflags_parse(newstr, fence)) == 0)
576 (void) strlcpy(pmc_debugstr, newstr,
577 sizeof(pmc_debugstr));
587 * Map a row index to a classdep structure and return the adjusted row
588 * index for the PMC class index.
590 static struct pmc_classdep *
591 pmc_ri_to_classdep(struct pmc_mdep *md, int ri, int *adjri)
593 struct pmc_classdep *pcd;
597 KASSERT(ri >= 0 && ri < md->pmd_npmc,
598 ("[pmc,%d] illegal row-index %d", __LINE__, ri));
600 pcd = pmc_rowindex_to_classdep[ri];
603 ("[pmc,%d] ri %d null pcd", __LINE__, ri));
605 *adjri = ri - pcd->pcd_ri;
607 KASSERT(*adjri >= 0 && *adjri < pcd->pcd_num,
608 ("[pmc,%d] adjusted row-index %d", __LINE__, *adjri));
614 * Concurrency Control
616 * The driver manages the following data structures:
618 * - target process descriptors, one per target process
619 * - owner process descriptors (and attached lists), one per owner process
620 * - lookup hash tables for owner and target processes
621 * - PMC descriptors (and attached lists)
622 * - per-cpu hardware state
623 * - the 'hook' variable through which the kernel calls into
625 * - the machine hardware state (managed by the MD layer)
627 * These data structures are accessed from:
629 * - thread context-switch code
630 * - interrupt handlers (possibly on multiple cpus)
631 * - kernel threads on multiple cpus running on behalf of user
632 * processes doing system calls
633 * - this driver's private kernel threads
635 * = Locks and Locking strategy =
637 * The driver uses four locking strategies for its operation:
639 * - The global SX lock "pmc_sx" is used to protect internal
642 * Calls into the module by syscall() start with this lock being
643 * held in exclusive mode. Depending on the requested operation,
644 * the lock may be downgraded to 'shared' mode to allow more
645 * concurrent readers into the module. Calls into the module from
646 * other parts of the kernel acquire the lock in shared mode.
648 * This SX lock is held in exclusive mode for any operations that
649 * modify the linkages between the driver's internal data structures.
651 * The 'pmc_hook' function pointer is also protected by this lock.
652 * It is only examined with the sx lock held in exclusive mode. The
653 * kernel module is allowed to be unloaded only with the sx lock held
654 * in exclusive mode. In normal syscall handling, after acquiring the
655 * pmc_sx lock we first check that 'pmc_hook' is non-null before
656 * proceeding. This prevents races between the thread unloading the module
657 * and other threads seeking to use the module.
659 * - Lookups of target process structures and owner process structures
660 * cannot use the global "pmc_sx" SX lock because these lookups need
661 * to happen during context switches and in other critical sections
662 * where sleeping is not allowed. We protect these lookup tables
663 * with their own private spin-mutexes, "pmc_processhash_mtx" and
664 * "pmc_ownerhash_mtx".
666 * - Interrupt handlers work in a lock free manner. At interrupt
667 * time, handlers look at the PMC pointer (phw->phw_pmc) configured
668 * when the PMC was started. If this pointer is NULL, the interrupt
669 * is ignored after updating driver statistics. We ensure that this
670 * pointer is set (using an atomic operation if necessary) before the
671 * PMC hardware is started. Conversely, this pointer is unset atomically
672 * only after the PMC hardware is stopped.
674 * We ensure that everything needed for the operation of an
675 * interrupt handler is available without it needing to acquire any
676 * locks. We also ensure that a PMC's software state is destroyed only
677 * after the PMC is taken off hardware (on all CPUs).
679 * - Context-switch handling with process-private PMCs needs more
682 * A given process may be the target of multiple PMCs. For example,
683 * PMCATTACH and PMCDETACH may be requested by a process on one CPU
684 * while the target process is running on another. A PMC could also
685 * be getting released because its owner is exiting. We tackle
686 * these situations in the following manner:
688 * - each target process structure 'pmc_process' has an array
689 * of 'struct pmc *' pointers, one for each hardware PMC.
691 * - At context switch IN time, each "target" PMC in RUNNING state
692 * gets started on hardware and a pointer to each PMC is copied into
693 * the per-cpu phw array. The 'runcount' for the PMC is
696 * - At context switch OUT time, all process-virtual PMCs are stopped
697 * on hardware. The saved value is added to the PMCs value field
698 * only if the PMC is in a non-deleted state (the PMCs state could
699 * have changed during the current time slice).
701 * Note that since in-between a switch IN on a processor and a switch
702 * OUT, the PMC could have been released on another CPU. Therefore
703 * context switch OUT always looks at the hardware state to turn
704 * OFF PMCs and will update a PMC's saved value only if reachable
705 * from the target process record.
707 * - OP PMCRELEASE could be called on a PMC at any time (the PMC could
708 * be attached to many processes at the time of the call and could
709 * be active on multiple CPUs).
711 * We prevent further scheduling of the PMC by marking it as in
712 * state 'DELETED'. If the runcount of the PMC is non-zero then
713 * this PMC is currently running on a CPU somewhere. The thread
714 * doing the PMCRELEASE operation waits by repeatedly doing a
715 * pause() till the runcount comes to zero.
717 * The contents of a PMC descriptor (struct pmc) are protected using
718 * a spin-mutex. In order to save space, we use a mutex pool.
720 * In terms of lock types used by witness(4), we use:
721 * - Type "pmc-sx", used by the global SX lock.
722 * - Type "pmc-sleep", for sleep mutexes used by logger threads.
723 * - Type "pmc-per-proc", for protecting PMC owner descriptors.
724 * - Type "pmc-leaf", used for all other spin mutexes.
728 * save the cpu binding of the current kthread
732 pmc_save_cpu_binding(struct pmc_binding *pb)
734 PMCDBG0(CPU,BND,2, "save-cpu");
735 thread_lock(curthread);
736 pb->pb_bound = sched_is_bound(curthread);
737 pb->pb_cpu = curthread->td_oncpu;
738 thread_unlock(curthread);
739 PMCDBG1(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu);
743 * restore the cpu binding of the current thread
747 pmc_restore_cpu_binding(struct pmc_binding *pb)
749 PMCDBG2(CPU,BND,2, "restore-cpu curcpu=%d restore=%d",
750 curthread->td_oncpu, pb->pb_cpu);
751 thread_lock(curthread);
753 sched_bind(curthread, pb->pb_cpu);
755 sched_unbind(curthread);
756 thread_unlock(curthread);
757 PMCDBG0(CPU,BND,2, "restore-cpu done");
761 * move execution over the specified cpu and bind it there.
765 pmc_select_cpu(int cpu)
767 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
768 ("[pmc,%d] bad cpu number %d", __LINE__, cpu));
770 /* Never move to an inactive CPU. */
771 KASSERT(pmc_cpu_is_active(cpu), ("[pmc,%d] selecting inactive "
772 "CPU %d", __LINE__, cpu));
774 PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d", cpu);
775 thread_lock(curthread);
776 sched_bind(curthread, cpu);
777 thread_unlock(curthread);
779 KASSERT(curthread->td_oncpu == cpu,
780 ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__,
781 cpu, curthread->td_oncpu));
783 PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d ok", cpu);
787 * Force a context switch.
789 * We do this by pause'ing for 1 tick -- invoking mi_switch() is not
790 * guaranteed to force a context switch.
794 pmc_force_context_switch(void)
801 * Get the file name for an executable. This is a simple wrapper
802 * around vn_fullpath(9).
806 pmc_getfilename(struct vnode *v, char **fullpath, char **freepath)
809 *fullpath = "unknown";
811 vn_fullpath(curthread, v, fullpath, freepath);
815 * remove an process owning PMCs
819 pmc_remove_owner(struct pmc_owner *po)
821 struct pmc *pm, *tmp;
823 sx_assert(&pmc_sx, SX_XLOCKED);
825 PMCDBG1(OWN,ORM,1, "remove-owner po=%p", po);
827 /* Remove descriptor from the owner hash table */
828 LIST_REMOVE(po, po_next);
830 /* release all owned PMC descriptors */
831 LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) {
832 PMCDBG1(OWN,ORM,2, "pmc=%p", pm);
833 KASSERT(pm->pm_owner == po,
834 ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po));
836 pmc_release_pmc_descriptor(pm); /* will unlink from the list */
837 pmc_destroy_pmc_descriptor(pm);
840 KASSERT(po->po_sscount == 0,
841 ("[pmc,%d] SS count not zero", __LINE__));
842 KASSERT(LIST_EMPTY(&po->po_pmcs),
843 ("[pmc,%d] PMC list not empty", __LINE__));
845 /* de-configure the log file if present */
846 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
847 pmclog_deconfigure_log(po);
851 * remove an owner process record if all conditions are met.
855 pmc_maybe_remove_owner(struct pmc_owner *po)
858 PMCDBG1(OWN,OMR,1, "maybe-remove-owner po=%p", po);
861 * Remove owner record if
862 * - this process does not own any PMCs
863 * - this process has not allocated a system-wide sampling buffer
866 if (LIST_EMPTY(&po->po_pmcs) &&
867 ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)) {
868 pmc_remove_owner(po);
869 pmc_destroy_owner_descriptor(po);
874 * Add an association between a target process and a PMC.
878 pmc_link_target_process(struct pmc *pm, struct pmc_process *pp)
881 struct pmc_target *pt;
883 struct pmc_thread *pt_td;
886 sx_assert(&pmc_sx, SX_XLOCKED);
888 KASSERT(pm != NULL && pp != NULL,
889 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
890 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
891 ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d",
892 __LINE__, pm, pp->pp_proc->p_pid));
893 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= ((int) md->pmd_npmc - 1),
894 ("[pmc,%d] Illegal reference count %d for process record %p",
895 __LINE__, pp->pp_refcnt, (void *) pp));
897 ri = PMC_TO_ROWINDEX(pm);
899 PMCDBG3(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p",
903 LIST_FOREACH(pt, &pm->pm_targets, pt_next)
904 if (pt->pt_process == pp)
905 KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets",
909 pt = malloc(sizeof(struct pmc_target), M_PMC, M_WAITOK|M_ZERO);
912 LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next);
914 atomic_store_rel_ptr((uintptr_t *)&pp->pp_pmcs[ri].pp_pmc,
917 if (pm->pm_owner->po_owner == pp->pp_proc)
918 pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER;
921 * Initialize the per-process values at this row index.
923 pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ?
924 pm->pm_sc.pm_reloadcount : 0;
929 /* Confirm that the per-thread values at this row index are cleared. */
930 if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
931 mtx_lock_spin(pp->pp_tdslock);
932 LIST_FOREACH(pt_td, &pp->pp_tds, pt_next) {
933 KASSERT(pt_td->pt_pmcs[ri].pt_pmcval == (pmc_value_t) 0,
934 ("[pmc,%d] pt_pmcval not cleared for pid=%d at "
935 "ri=%d", __LINE__, pp->pp_proc->p_pid, ri));
937 mtx_unlock_spin(pp->pp_tdslock);
943 * Removes the association between a target process and a PMC.
947 pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
951 struct pmc_target *ptgt;
952 struct pmc_thread *pt;
954 sx_assert(&pmc_sx, SX_XLOCKED);
956 KASSERT(pm != NULL && pp != NULL,
957 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
959 KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt <= (int) md->pmd_npmc,
960 ("[pmc,%d] Illegal ref count %d on process record %p",
961 __LINE__, pp->pp_refcnt, (void *) pp));
963 ri = PMC_TO_ROWINDEX(pm);
965 PMCDBG3(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p",
968 KASSERT(pp->pp_pmcs[ri].pp_pmc == pm,
969 ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__,
970 ri, pm, pp->pp_pmcs[ri].pp_pmc));
972 pp->pp_pmcs[ri].pp_pmc = NULL;
973 pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0;
975 /* Clear the per-thread values at this row index. */
976 if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
977 mtx_lock_spin(pp->pp_tdslock);
978 LIST_FOREACH(pt, &pp->pp_tds, pt_next)
979 pt->pt_pmcs[ri].pt_pmcval = (pmc_value_t) 0;
980 mtx_unlock_spin(pp->pp_tdslock);
983 /* Remove owner-specific flags */
984 if (pm->pm_owner->po_owner == pp->pp_proc) {
985 pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS;
986 pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER;
991 /* Remove the target process from the PMC structure */
992 LIST_FOREACH(ptgt, &pm->pm_targets, pt_next)
993 if (ptgt->pt_process == pp)
996 KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found "
997 "in pmc %p", __LINE__, pp->pp_proc, pp, pm));
999 LIST_REMOVE(ptgt, pt_next);
1002 /* if the PMC now lacks targets, send the owner a SIGIO */
1003 if (LIST_EMPTY(&pm->pm_targets)) {
1004 p = pm->pm_owner->po_owner;
1006 kern_psignal(p, SIGIO);
1009 PMCDBG2(PRC,SIG,2, "signalling proc=%p signal=%d", p,
1015 * Check if PMC 'pm' may be attached to target process 't'.
1019 pmc_can_attach(struct pmc *pm, struct proc *t)
1021 struct proc *o; /* pmc owner */
1022 struct ucred *oc, *tc; /* owner, target credentials */
1023 int decline_attach, i;
1026 * A PMC's owner can always attach that PMC to itself.
1029 if ((o = pm->pm_owner->po_owner) == t)
1043 * The effective uid of the PMC owner should match at least one
1044 * of the {effective,real,saved} uids of the target process.
1047 decline_attach = oc->cr_uid != tc->cr_uid &&
1048 oc->cr_uid != tc->cr_svuid &&
1049 oc->cr_uid != tc->cr_ruid;
1052 * Every one of the target's group ids, must be in the owner's
1055 for (i = 0; !decline_attach && i < tc->cr_ngroups; i++)
1056 decline_attach = !groupmember(tc->cr_groups[i], oc);
1058 /* check the read and saved gids too */
1059 if (decline_attach == 0)
1060 decline_attach = !groupmember(tc->cr_rgid, oc) ||
1061 !groupmember(tc->cr_svgid, oc);
1066 return !decline_attach;
1070 * Attach a process to a PMC.
1074 pmc_attach_one_process(struct proc *p, struct pmc *pm)
1077 char *fullpath, *freepath;
1078 struct pmc_process *pp;
1080 sx_assert(&pmc_sx, SX_XLOCKED);
1082 PMCDBG5(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm,
1083 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1086 * Locate the process descriptor corresponding to process 'p',
1087 * allocating space as needed.
1089 * Verify that rowindex 'pm_rowindex' is free in the process
1092 * If not, allocate space for a descriptor and link the
1093 * process descriptor and PMC.
1095 ri = PMC_TO_ROWINDEX(pm);
1097 /* mark process as using HWPMCs */
1099 p->p_flag |= P_HWPMC;
1102 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL) {
1107 if (pp->pp_pmcs[ri].pp_pmc == pm) {/* already present at slot [ri] */
1112 if (pp->pp_pmcs[ri].pp_pmc != NULL) {
1117 pmc_link_target_process(pm, pp);
1119 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) &&
1120 (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) == 0)
1121 pm->pm_flags |= PMC_F_NEEDS_LOGFILE;
1123 pm->pm_flags |= PMC_F_ATTACH_DONE; /* mark as attached */
1125 /* issue an attach event to a configured log file */
1126 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) {
1127 if (p->p_flag & P_KPROC) {
1128 fullpath = kernelname;
1131 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
1132 pmclog_process_pmcattach(pm, p->p_pid, fullpath);
1134 free(freepath, M_TEMP);
1135 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1136 pmc_log_process_mappings(pm->pm_owner, p);
1142 p->p_flag &= ~P_HWPMC;
1148 * Attach a process and optionally its children
1152 pmc_attach_process(struct proc *p, struct pmc *pm)
1157 sx_assert(&pmc_sx, SX_XLOCKED);
1159 PMCDBG5(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm,
1160 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1164 * If this PMC successfully allowed a GETMSR operation
1165 * in the past, disallow further ATTACHes.
1168 if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0)
1171 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1172 return pmc_attach_one_process(p, pm);
1175 * Traverse all child processes, attaching them to
1179 sx_slock(&proctree_lock);
1184 if ((error = pmc_attach_one_process(p, pm)) != 0)
1186 if (!LIST_EMPTY(&p->p_children))
1187 p = LIST_FIRST(&p->p_children);
1191 if (LIST_NEXT(p, p_sibling)) {
1192 p = LIST_NEXT(p, p_sibling);
1200 (void) pmc_detach_process(top, pm);
1203 sx_sunlock(&proctree_lock);
1208 * Detach a process from a PMC. If there are no other PMCs tracking
1209 * this process, remove the process structure from its hash table. If
1210 * 'flags' contains PMC_FLAG_REMOVE, then free the process structure.
1214 pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags)
1217 struct pmc_process *pp;
1219 sx_assert(&pmc_sx, SX_XLOCKED);
1222 ("[pmc,%d] null pm pointer", __LINE__));
1224 ri = PMC_TO_ROWINDEX(pm);
1226 PMCDBG6(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x",
1227 pm, ri, p, p->p_pid, p->p_comm, flags);
1229 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
1232 if (pp->pp_pmcs[ri].pp_pmc != pm)
1235 pmc_unlink_target_process(pm, pp);
1237 /* Issue a detach entry if a log file is configured */
1238 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE)
1239 pmclog_process_pmcdetach(pm, p->p_pid);
1242 * If there are no PMCs targeting this process, we remove its
1243 * descriptor from the target hash table and unset the P_HWPMC
1244 * flag in the struct proc.
1246 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc,
1247 ("[pmc,%d] Illegal refcnt %d for process struct %p",
1248 __LINE__, pp->pp_refcnt, pp));
1250 if (pp->pp_refcnt != 0) /* still a target of some PMC */
1253 pmc_remove_process_descriptor(pp);
1255 if (flags & PMC_FLAG_REMOVE)
1256 pmc_destroy_process_descriptor(pp);
1259 p->p_flag &= ~P_HWPMC;
1266 * Detach a process and optionally its descendants from a PMC.
1270 pmc_detach_process(struct proc *p, struct pmc *pm)
1274 sx_assert(&pmc_sx, SX_XLOCKED);
1276 PMCDBG5(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm,
1277 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1279 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1280 return pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1283 * Traverse all children, detaching them from this PMC. We
1284 * ignore errors since we could be detaching a PMC from a
1285 * partially attached proc tree.
1288 sx_slock(&proctree_lock);
1293 (void) pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1295 if (!LIST_EMPTY(&p->p_children))
1296 p = LIST_FIRST(&p->p_children);
1300 if (LIST_NEXT(p, p_sibling)) {
1301 p = LIST_NEXT(p, p_sibling);
1309 sx_sunlock(&proctree_lock);
1311 if (LIST_EMPTY(&pm->pm_targets))
1312 pm->pm_flags &= ~PMC_F_ATTACH_DONE;
1319 * Thread context switch IN
1323 pmc_process_csw_in(struct thread *td)
1326 unsigned int adjri, ri;
1331 pmc_value_t newvalue;
1332 struct pmc_process *pp;
1333 struct pmc_thread *pt;
1334 struct pmc_classdep *pcd;
1338 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL)
1341 KASSERT(pp->pp_proc == td->td_proc,
1342 ("[pmc,%d] not my thread state", __LINE__));
1344 critical_enter(); /* no preemption from this point */
1346 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1348 PMCDBG5(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1349 p->p_pid, p->p_comm, pp);
1351 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1352 ("[pmc,%d] weird CPU id %d", __LINE__, cpu));
1356 for (ri = 0; ri < md->pmd_npmc; ri++) {
1358 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL)
1361 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
1362 ("[pmc,%d] Target PMC in non-virtual mode (%d)",
1363 __LINE__, PMC_TO_MODE(pm)));
1365 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1366 ("[pmc,%d] Row index mismatch pmc %d != ri %d",
1367 __LINE__, PMC_TO_ROWINDEX(pm), ri));
1370 * Only PMCs that are marked as 'RUNNING' need
1371 * be placed on hardware.
1374 if (pm->pm_state != PMC_STATE_RUNNING)
1377 /* increment PMC runcount */
1378 counter_u64_add(pm->pm_runcount, 1);
1380 /* configure the HWPMC we are going to use. */
1381 pcd = pmc_ri_to_classdep(md, ri, &adjri);
1382 pcd->pcd_config_pmc(cpu, adjri, pm);
1384 phw = pc->pc_hwpmcs[ri];
1386 KASSERT(phw != NULL,
1387 ("[pmc,%d] null hw pointer", __LINE__));
1389 KASSERT(phw->phw_pmc == pm,
1390 ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__,
1394 * Write out saved value and start the PMC.
1396 * Sampling PMCs use a per-thread value, while
1397 * counting mode PMCs use a per-pmc value that is
1398 * inherited across descendants.
1400 if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
1402 pt = pmc_find_thread_descriptor(pp, td,
1406 ("[pmc,%d] No thread found for td=%p", __LINE__,
1409 mtx_pool_lock_spin(pmc_mtxpool, pm);
1412 * If we have a thread descriptor, use the per-thread
1413 * counter in the descriptor. If not, we will use
1414 * a per-process counter.
1416 * TODO: Remove the per-process "safety net" once
1417 * we have thoroughly tested that we don't hit the
1421 if (pt->pt_pmcs[ri].pt_pmcval > 0)
1422 newvalue = pt->pt_pmcs[ri].pt_pmcval;
1424 newvalue = pm->pm_sc.pm_reloadcount;
1427 * Use the saved value calculated after the most
1428 * recent time a thread using the shared counter
1429 * switched out. Reset the saved count in case
1430 * another thread from this process switches in
1431 * before any threads switch out.
1434 newvalue = pp->pp_pmcs[ri].pp_pmcval;
1435 pp->pp_pmcs[ri].pp_pmcval =
1436 pm->pm_sc.pm_reloadcount;
1438 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1439 KASSERT(newvalue > 0 && newvalue <=
1440 pm->pm_sc.pm_reloadcount,
1441 ("[pmc,%d] pmcval outside of expected range cpu=%d "
1442 "ri=%d pmcval=%jx pm_reloadcount=%jx", __LINE__,
1443 cpu, ri, newvalue, pm->pm_sc.pm_reloadcount));
1445 KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC,
1446 ("[pmc,%d] illegal mode=%d", __LINE__,
1448 mtx_pool_lock_spin(pmc_mtxpool, pm);
1449 newvalue = PMC_PCPU_SAVED(cpu, ri) =
1450 pm->pm_gv.pm_savedvalue;
1451 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1454 PMCDBG3(CSW,SWI,1,"cpu=%d ri=%d new=%jd", cpu, ri, newvalue);
1456 pcd->pcd_write_pmc(cpu, adjri, newvalue);
1458 /* If a sampling mode PMC, reset stalled state. */
1459 if (PMC_TO_MODE(pm) == PMC_MODE_TS)
1460 pm->pm_pcpu_state[cpu].pps_stalled = 0;
1462 /* Indicate that we desire this to run. */
1463 pm->pm_pcpu_state[cpu].pps_cpustate = 1;
1465 /* Start the PMC. */
1466 pcd->pcd_start_pmc(cpu, adjri);
1470 * perform any other architecture/cpu dependent thread
1471 * switch-in actions.
1474 (void) (*md->pmd_switch_in)(pc, pp);
1481 * Thread context switch OUT.
1485 pmc_process_csw_out(struct thread *td)
1493 pmc_value_t newvalue;
1494 unsigned int adjri, ri;
1495 struct pmc_process *pp;
1496 struct pmc_thread *pt = NULL;
1497 struct pmc_classdep *pcd;
1501 * Locate our process descriptor; this may be NULL if
1502 * this process is exiting and we have already removed
1503 * the process from the target process table.
1505 * Note that due to kernel preemption, multiple
1506 * context switches may happen while the process is
1509 * Note also that if the target process cannot be
1510 * found we still need to deconfigure any PMCs that
1511 * are currently running on hardware.
1515 pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE);
1523 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1525 PMCDBG5(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1526 p->p_pid, p->p_comm, pp);
1528 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1529 ("[pmc,%d weird CPU id %d", __LINE__, cpu));
1534 * When a PMC gets unlinked from a target PMC, it will
1535 * be removed from the target's pp_pmc[] array.
1537 * However, on a MP system, the target could have been
1538 * executing on another CPU at the time of the unlink.
1539 * So, at context switch OUT time, we need to look at
1540 * the hardware to determine if a PMC is scheduled on
1544 for (ri = 0; ri < md->pmd_npmc; ri++) {
1546 pcd = pmc_ri_to_classdep(md, ri, &adjri);
1548 (void) (*pcd->pcd_get_config)(cpu, adjri, &pm);
1550 if (pm == NULL) /* nothing at this row index */
1553 mode = PMC_TO_MODE(pm);
1554 if (!PMC_IS_VIRTUAL_MODE(mode))
1555 continue; /* not a process virtual PMC */
1557 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1558 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
1559 __LINE__, PMC_TO_ROWINDEX(pm), ri));
1562 * Change desired state, and then stop if not stalled.
1563 * This two-step dance should avoid race conditions where
1564 * an interrupt re-enables the PMC after this code has
1565 * already checked the pm_stalled flag.
1567 pm->pm_pcpu_state[cpu].pps_cpustate = 0;
1568 if (pm->pm_pcpu_state[cpu].pps_stalled == 0)
1569 pcd->pcd_stop_pmc(cpu, adjri);
1571 /* reduce this PMC's runcount */
1572 counter_u64_add(pm->pm_runcount, -1);
1575 * If this PMC is associated with this process,
1579 if (pm->pm_state != PMC_STATE_DELETED && pp != NULL &&
1580 pp->pp_pmcs[ri].pp_pmc != NULL) {
1581 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
1582 ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__,
1583 pm, ri, pp->pp_pmcs[ri].pp_pmc));
1585 KASSERT(pp->pp_refcnt > 0,
1586 ("[pmc,%d] pp refcnt = %d", __LINE__,
1589 pcd->pcd_read_pmc(cpu, adjri, &newvalue);
1591 if (mode == PMC_MODE_TS) {
1592 PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d val=%jd (samp)",
1596 pt = pmc_find_thread_descriptor(pp, td,
1600 ("[pmc,%d] No thread found for td=%p",
1603 mtx_pool_lock_spin(pmc_mtxpool, pm);
1606 * If we have a thread descriptor, save the
1607 * per-thread counter in the descriptor. If not,
1608 * we will update the per-process counter.
1610 * TODO: Remove the per-process "safety net"
1611 * once we have thoroughly tested that we
1612 * don't hit the above assert.
1615 pt->pt_pmcs[ri].pt_pmcval = newvalue;
1618 * For sampling process-virtual PMCs,
1619 * newvalue is the number of events to
1620 * be seen until the next sampling
1621 * interrupt. We can just add the events
1622 * left from this invocation to the
1623 * counter, then adjust in case we
1624 * overflow our range.
1626 * (Recall that we reload the counter
1627 * every time we use it.)
1629 pp->pp_pmcs[ri].pp_pmcval += newvalue;
1630 if (pp->pp_pmcs[ri].pp_pmcval >
1631 pm->pm_sc.pm_reloadcount)
1632 pp->pp_pmcs[ri].pp_pmcval -=
1633 pm->pm_sc.pm_reloadcount;
1635 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1637 tmp = newvalue - PMC_PCPU_SAVED(cpu,ri);
1639 PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d tmp=%jd (count)",
1643 * For counting process-virtual PMCs,
1644 * we expect the count to be
1645 * increasing monotonically, modulo a 64
1649 ("[pmc,%d] negative increment cpu=%d "
1650 "ri=%d newvalue=%jx saved=%jx "
1651 "incr=%jx", __LINE__, cpu, ri,
1652 newvalue, PMC_PCPU_SAVED(cpu,ri), tmp));
1654 mtx_pool_lock_spin(pmc_mtxpool, pm);
1655 pm->pm_gv.pm_savedvalue += tmp;
1656 pp->pp_pmcs[ri].pp_pmcval += tmp;
1657 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1659 if (pm->pm_flags & PMC_F_LOG_PROCCSW)
1660 pmclog_process_proccsw(pm, pp, tmp);
1664 /* mark hardware as free */
1665 pcd->pcd_config_pmc(cpu, adjri, NULL);
1669 * perform any other architecture/cpu dependent thread
1670 * switch out functions.
1673 (void) (*md->pmd_switch_out)(pc, pp);
1679 * A new thread for a process.
1682 pmc_process_thread_add(struct thread *td)
1684 struct pmc_process *pmc;
1686 pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE);
1688 pmc_find_thread_descriptor(pmc, td, PMC_FLAG_ALLOCATE);
1692 * A thread delete for a process.
1695 pmc_process_thread_delete(struct thread *td)
1697 struct pmc_process *pmc;
1699 pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE);
1701 pmc_thread_descriptor_pool_free(pmc_find_thread_descriptor(pmc,
1702 td, PMC_FLAG_REMOVE));
1706 * A mapping change for a process.
1710 pmc_process_mmap(struct thread *td, struct pmckern_map_in *pkm)
1714 char *fullpath, *freepath;
1715 const struct pmc *pm;
1716 struct pmc_owner *po;
1717 const struct pmc_process *pp;
1719 freepath = fullpath = NULL;
1720 epoch_exit_preempt(global_epoch_preempt);
1721 pmc_getfilename((struct vnode *) pkm->pm_file, &fullpath, &freepath);
1723 pid = td->td_proc->p_pid;
1725 epoch_enter_preempt(global_epoch_preempt);
1726 /* Inform owners of all system-wide sampling PMCs. */
1727 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1728 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1729 pmclog_process_map_in(po, pid, pkm->pm_address, fullpath);
1731 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1735 * Inform sampling PMC owners tracking this process.
1737 for (ri = 0; ri < md->pmd_npmc; ri++)
1738 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
1739 PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1740 pmclog_process_map_in(pm->pm_owner,
1741 pid, pkm->pm_address, fullpath);
1745 free(freepath, M_TEMP);
1750 * Log an munmap request.
1754 pmc_process_munmap(struct thread *td, struct pmckern_map_out *pkm)
1758 struct pmc_owner *po;
1759 const struct pmc *pm;
1760 const struct pmc_process *pp;
1762 pid = td->td_proc->p_pid;
1764 epoch_enter_preempt(global_epoch_preempt);
1765 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1766 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1767 pmclog_process_map_out(po, pid, pkm->pm_address,
1768 pkm->pm_address + pkm->pm_size);
1769 epoch_exit_preempt(global_epoch_preempt);
1771 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1774 for (ri = 0; ri < md->pmd_npmc; ri++)
1775 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
1776 PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1777 pmclog_process_map_out(pm->pm_owner, pid,
1778 pkm->pm_address, pkm->pm_address + pkm->pm_size);
1782 * Log mapping information about the kernel.
1786 pmc_log_kernel_mappings(struct pmc *pm)
1788 struct pmc_owner *po;
1789 struct pmckern_map_in *km, *kmbase;
1791 MPASS(in_epoch() || sx_xlocked(&pmc_sx));
1792 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
1793 ("[pmc,%d] non-sampling PMC (%p) desires mapping information",
1794 __LINE__, (void *) pm));
1798 if (po->po_flags & PMC_PO_INITIAL_MAPPINGS_DONE)
1802 * Log the current set of kernel modules.
1804 kmbase = linker_hwpmc_list_objects();
1805 for (km = kmbase; km->pm_file != NULL; km++) {
1806 PMCDBG2(LOG,REG,1,"%s %p", (char *) km->pm_file,
1807 (void *) km->pm_address);
1808 pmclog_process_map_in(po, (pid_t) -1, km->pm_address,
1811 free(kmbase, M_LINKER);
1813 po->po_flags |= PMC_PO_INITIAL_MAPPINGS_DONE;
1817 * Log the mappings for a single process.
1821 pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
1826 vm_map_entry_t entry;
1827 vm_offset_t last_end;
1828 u_int last_timestamp;
1829 struct vnode *last_vp;
1830 vm_offset_t start_addr;
1831 vm_object_t obj, lobj, tobj;
1832 char *fullpath, *freepath;
1835 last_end = (vm_offset_t) 0;
1836 fullpath = freepath = NULL;
1838 if ((vm = vmspace_acquire_ref(p)) == NULL)
1842 vm_map_lock_read(map);
1844 for (entry = map->header.next; entry != &map->header; entry = entry->next) {
1846 if (entry == NULL) {
1847 PMCDBG2(LOG,OPS,2, "hwpmc: vm_map entry unexpectedly "
1848 "NULL! pid=%d vm_map=%p\n", p->p_pid, map);
1853 * We only care about executable map entries.
1855 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) ||
1856 !(entry->protection & VM_PROT_EXECUTE) ||
1857 (entry->object.vm_object == NULL)) {
1861 obj = entry->object.vm_object;
1862 VM_OBJECT_RLOCK(obj);
1865 * Walk the backing_object list to find the base
1866 * (non-shadowed) vm_object.
1868 for (lobj = tobj = obj; tobj != NULL; tobj = tobj->backing_object) {
1870 VM_OBJECT_RLOCK(tobj);
1872 VM_OBJECT_RUNLOCK(lobj);
1877 * At this point lobj is the base vm_object and it is locked.
1880 PMCDBG3(LOG,OPS,2, "hwpmc: lobj unexpectedly NULL! pid=%d "
1881 "vm_map=%p vm_obj=%p\n", p->p_pid, map, obj);
1882 VM_OBJECT_RUNLOCK(obj);
1886 vp = vm_object_vnode(lobj);
1889 VM_OBJECT_RUNLOCK(lobj);
1890 VM_OBJECT_RUNLOCK(obj);
1895 * Skip contiguous regions that point to the same
1896 * vnode, so we don't emit redundant MAP-IN
1899 if (entry->start == last_end && vp == last_vp) {
1900 last_end = entry->end;
1902 VM_OBJECT_RUNLOCK(lobj);
1903 VM_OBJECT_RUNLOCK(obj);
1908 * We don't want to keep the proc's vm_map or this
1909 * vm_object locked while we walk the pathname, since
1910 * vn_fullpath() can sleep. However, if we drop the
1911 * lock, it's possible for concurrent activity to
1912 * modify the vm_map list. To protect against this,
1913 * we save the vm_map timestamp before we release the
1914 * lock, and check it after we reacquire the lock
1917 start_addr = entry->start;
1918 last_end = entry->end;
1919 last_timestamp = map->timestamp;
1920 vm_map_unlock_read(map);
1924 VM_OBJECT_RUNLOCK(lobj);
1926 VM_OBJECT_RUNLOCK(obj);
1929 pmc_getfilename(vp, &fullpath, &freepath);
1935 pmclog_process_map_in(po, p->p_pid, start_addr, fullpath);
1937 free(freepath, M_TEMP);
1939 vm_map_lock_read(map);
1942 * If our saved timestamp doesn't match, this means
1943 * that the vm_map was modified out from under us and
1944 * we can't trust our current "entry" pointer. Do a
1945 * new lookup for this entry. If there is no entry
1946 * for this address range, vm_map_lookup_entry() will
1947 * return the previous one, so we always want to go to
1948 * entry->next on the next loop iteration.
1950 * There is an edge condition here that can occur if
1951 * there is no entry at or before this address. In
1952 * this situation, vm_map_lookup_entry returns
1953 * &map->header, which would cause our loop to abort
1954 * without processing the rest of the map. However,
1955 * in practice this will never happen for process
1956 * vm_map. This is because the executable's text
1957 * segment is the first mapping in the proc's address
1958 * space, and this mapping is never removed until the
1959 * process exits, so there will always be a non-header
1960 * entry at or before the requested address for
1961 * vm_map_lookup_entry to return.
1963 if (map->timestamp != last_timestamp)
1964 vm_map_lookup_entry(map, last_end - 1, &entry);
1967 vm_map_unlock_read(map);
1973 * Log mappings for all processes in the system.
1977 pmc_log_all_process_mappings(struct pmc_owner *po)
1979 struct proc *p, *top;
1981 sx_assert(&pmc_sx, SX_XLOCKED);
1983 if ((p = pfind(1)) == NULL)
1984 panic("[pmc,%d] Cannot find init", __LINE__);
1988 sx_slock(&proctree_lock);
1993 pmc_log_process_mappings(po, p);
1994 if (!LIST_EMPTY(&p->p_children))
1995 p = LIST_FIRST(&p->p_children);
1999 if (LIST_NEXT(p, p_sibling)) {
2000 p = LIST_NEXT(p, p_sibling);
2007 sx_sunlock(&proctree_lock);
2011 * The 'hook' invoked from the kernel proper
2016 const char *pmc_hooknames[] = {
2017 /* these strings correspond to PMC_FN_* in <sys/pmckern.h> */
2036 pmc_hook_handler(struct thread *td, int function, void *arg)
2040 PMCDBG4(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function,
2041 pmc_hooknames[function], arg);
2050 case PMC_FN_PROCESS_EXEC:
2052 char *fullpath, *freepath;
2054 int is_using_hwpmcs;
2057 struct pmc_owner *po;
2058 struct pmc_process *pp;
2059 struct pmckern_procexec *pk;
2061 sx_assert(&pmc_sx, SX_XLOCKED);
2064 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
2066 pk = (struct pmckern_procexec *) arg;
2068 epoch_enter_preempt(global_epoch_preempt);
2069 /* Inform owners of SS mode PMCs of the exec event. */
2070 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
2071 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
2072 pmclog_process_procexec(po, PMC_ID_INVALID,
2073 p->p_pid, pk->pm_entryaddr, fullpath);
2074 epoch_exit_preempt(global_epoch_preempt);
2077 is_using_hwpmcs = p->p_flag & P_HWPMC;
2080 if (!is_using_hwpmcs) {
2082 free(freepath, M_TEMP);
2087 * PMCs are not inherited across an exec(): remove any
2088 * PMCs that this process is the owner of.
2091 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
2092 pmc_remove_owner(po);
2093 pmc_destroy_owner_descriptor(po);
2097 * If the process being exec'ed is not the target of any
2100 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL) {
2102 free(freepath, M_TEMP);
2107 * Log the exec event to all monitoring owners. Skip
2108 * owners who have already received the event because
2109 * they had system sampling PMCs active.
2111 for (ri = 0; ri < md->pmd_npmc; ri++)
2112 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
2114 if (po->po_sscount == 0 &&
2115 po->po_flags & PMC_PO_OWNS_LOGFILE)
2116 pmclog_process_procexec(po, pm->pm_id,
2117 p->p_pid, pk->pm_entryaddr,
2122 free(freepath, M_TEMP);
2125 PMCDBG4(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d",
2126 p, p->p_pid, p->p_comm, pk->pm_credentialschanged);
2128 if (pk->pm_credentialschanged == 0) /* no change */
2132 * If the newly exec()'ed process has a different credential
2133 * than before, allow it to be the target of a PMC only if
2134 * the PMC's owner has sufficient privilege.
2137 for (ri = 0; ri < md->pmd_npmc; ri++)
2138 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL)
2139 if (pmc_can_attach(pm, td->td_proc) != 0)
2140 pmc_detach_one_process(td->td_proc,
2143 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc,
2144 ("[pmc,%d] Illegal ref count %d on pp %p", __LINE__,
2145 pp->pp_refcnt, pp));
2148 * If this process is no longer the target of any
2149 * PMCs, we can remove the process entry and free
2153 if (pp->pp_refcnt == 0) {
2154 pmc_remove_process_descriptor(pp);
2155 pmc_destroy_process_descriptor(pp);
2163 pmc_process_csw_in(td);
2166 case PMC_FN_CSW_OUT:
2167 pmc_process_csw_out(td);
2171 * Process accumulated PC samples.
2173 * This function is expected to be called by hardclock() for
2174 * each CPU that has accumulated PC samples.
2176 * This function is to be executed on the CPU whose samples
2177 * are being processed.
2179 case PMC_FN_DO_SAMPLES:
2182 * Clear the cpu specific bit in the CPU mask before
2183 * do the rest of the processing. If the NMI handler
2184 * gets invoked after the "atomic_clear_int()" call
2185 * below but before "pmc_process_samples()" gets
2186 * around to processing the interrupt, then we will
2187 * come back here at the next hardclock() tick (and
2188 * may find nothing to do if "pmc_process_samples()"
2189 * had already processed the interrupt). We don't
2190 * lose the interrupt sample.
2192 DPCPU_SET(pmc_sampled, 0);
2193 cpu = PCPU_GET(cpuid);
2194 pmc_process_samples(cpu, PMC_HR);
2195 pmc_process_samples(cpu, PMC_SR);
2199 MPASS(in_epoch() || sx_xlocked(&pmc_sx));
2200 pmc_process_mmap(td, (struct pmckern_map_in *) arg);
2204 MPASS(in_epoch() || sx_xlocked(&pmc_sx));
2205 pmc_process_munmap(td, (struct pmckern_map_out *) arg);
2208 case PMC_FN_USER_CALLCHAIN:
2210 * Record a call chain.
2212 KASSERT(td == curthread, ("[pmc,%d] td != curthread",
2215 pmc_capture_user_callchain(PCPU_GET(cpuid), PMC_HR,
2216 (struct trapframe *) arg);
2217 td->td_pflags &= ~TDP_CALLCHAIN;
2220 case PMC_FN_USER_CALLCHAIN_SOFT:
2222 * Record a call chain.
2224 KASSERT(td == curthread, ("[pmc,%d] td != curthread",
2226 pmc_capture_user_callchain(PCPU_GET(cpuid), PMC_SR,
2227 (struct trapframe *) arg);
2228 td->td_pflags &= ~TDP_CALLCHAIN;
2231 case PMC_FN_SOFT_SAMPLING:
2233 * Call soft PMC sampling intr.
2235 pmc_soft_intr((struct pmckern_soft *) arg);
2238 case PMC_FN_THR_CREATE:
2239 pmc_process_thread_add(td);
2242 case PMC_FN_THR_EXIT:
2243 KASSERT(td == curthread, ("[pmc,%d] td != curthread",
2245 pmc_process_thread_delete(td);
2250 KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function));
2260 * allocate a 'struct pmc_owner' descriptor in the owner hash table.
2263 static struct pmc_owner *
2264 pmc_allocate_owner_descriptor(struct proc *p)
2267 struct pmc_owner *po;
2268 struct pmc_ownerhash *poh;
2270 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
2271 poh = &pmc_ownerhash[hindex];
2273 /* allocate space for N pointers and one descriptor struct */
2274 po = malloc(sizeof(struct pmc_owner), M_PMC, M_WAITOK|M_ZERO);
2276 LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */
2278 TAILQ_INIT(&po->po_logbuffers);
2279 mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc-per-proc", MTX_SPIN);
2281 PMCDBG4(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p",
2282 p, p->p_pid, p->p_comm, po);
2288 pmc_destroy_owner_descriptor(struct pmc_owner *po)
2291 PMCDBG4(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)",
2292 po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm);
2294 mtx_destroy(&po->po_mtx);
2299 * Allocate a thread descriptor from the free pool.
2301 * NOTE: This *can* return NULL.
2303 static struct pmc_thread *
2304 pmc_thread_descriptor_pool_alloc(void)
2306 struct pmc_thread *pt;
2308 mtx_lock_spin(&pmc_threadfreelist_mtx);
2309 if ((pt = LIST_FIRST(&pmc_threadfreelist)) != NULL) {
2310 LIST_REMOVE(pt, pt_next);
2311 pmc_threadfreelist_entries--;
2313 mtx_unlock_spin(&pmc_threadfreelist_mtx);
2319 * Add a thread descriptor to the free pool. We use this instead of free()
2320 * to maintain a cache of free entries. Additionally, we can safely call
2321 * this function when we cannot call free(), such as in a critical section.
2325 pmc_thread_descriptor_pool_free(struct pmc_thread *pt)
2331 memset(pt, 0, THREADENTRY_SIZE);
2332 mtx_lock_spin(&pmc_threadfreelist_mtx);
2333 LIST_INSERT_HEAD(&pmc_threadfreelist, pt, pt_next);
2334 pmc_threadfreelist_entries++;
2335 if (pmc_threadfreelist_entries > pmc_threadfreelist_max)
2336 GROUPTASK_ENQUEUE(&free_gtask);
2337 mtx_unlock_spin(&pmc_threadfreelist_mtx);
2341 * A callout to manage the free list.
2344 pmc_thread_descriptor_pool_free_task(void *arg __unused)
2346 struct pmc_thread *pt;
2347 LIST_HEAD(, pmc_thread) tmplist;
2350 LIST_INIT(&tmplist);
2351 /* Determine what changes, if any, we need to make. */
2352 mtx_lock_spin(&pmc_threadfreelist_mtx);
2353 delta = pmc_threadfreelist_entries - pmc_threadfreelist_max;
2355 pt = LIST_FIRST(&pmc_threadfreelist);
2357 LIST_REMOVE(pt, pt_next);
2358 LIST_INSERT_HEAD(&tmplist, pt, pt_next);
2360 mtx_unlock_spin(&pmc_threadfreelist_mtx);
2362 /* If there are entries to free, free them. */
2363 while (!LIST_EMPTY(&tmplist)) {
2364 pt = LIST_FIRST(&pmc_threadfreelist);
2365 LIST_REMOVE(pt, pt_next);
2371 * Drain the thread free pool, freeing all allocations.
2374 pmc_thread_descriptor_pool_drain()
2376 struct pmc_thread *pt, *next;
2378 LIST_FOREACH_SAFE(pt, &pmc_threadfreelist, pt_next, next) {
2379 LIST_REMOVE(pt, pt_next);
2385 * find the descriptor corresponding to thread 'td', adding or removing it
2386 * as specified by 'mode'.
2388 * Note that this supports additional mode flags in addition to those
2389 * supported by pmc_find_process_descriptor():
2390 * PMC_FLAG_NOWAIT: Causes the function to not wait for mallocs.
2391 * This makes it safe to call while holding certain other locks.
2394 static struct pmc_thread *
2395 pmc_find_thread_descriptor(struct pmc_process *pp, struct thread *td,
2398 struct pmc_thread *pt = NULL, *ptnew = NULL;
2401 KASSERT(td != NULL, ("[pmc,%d] called to add NULL td", __LINE__));
2404 * Pre-allocate memory in the PMC_FLAG_ALLOCATE case prior to
2405 * acquiring the lock.
2407 if (mode & PMC_FLAG_ALLOCATE) {
2408 if ((ptnew = pmc_thread_descriptor_pool_alloc()) == NULL) {
2409 wait_flag = (mode & PMC_FLAG_NOWAIT) ? M_NOWAIT :
2411 ptnew = malloc(THREADENTRY_SIZE, M_PMC,
2416 mtx_lock_spin(pp->pp_tdslock);
2418 LIST_FOREACH(pt, &pp->pp_tds, pt_next)
2419 if (pt->pt_td == td)
2422 if ((mode & PMC_FLAG_REMOVE) && pt != NULL)
2423 LIST_REMOVE(pt, pt_next);
2425 if ((mode & PMC_FLAG_ALLOCATE) && pt == NULL && ptnew != NULL) {
2429 LIST_INSERT_HEAD(&pp->pp_tds, pt, pt_next);
2432 mtx_unlock_spin(pp->pp_tdslock);
2434 if (ptnew != NULL) {
2442 * Try to add thread descriptors for each thread in a process.
2446 pmc_add_thread_descriptors_from_proc(struct proc *p, struct pmc_process *pp)
2448 struct thread *curtd;
2449 struct pmc_thread **tdlist;
2450 int i, tdcnt, tdlistsz;
2452 KASSERT(!PROC_LOCKED(p), ("[pmc,%d] proc unexpectedly locked",
2456 tdlistsz = roundup2(tdcnt, 32);
2459 tdlist = malloc(sizeof(struct pmc_thread*) * tdlistsz, M_TEMP, M_WAITOK);
2462 FOREACH_THREAD_IN_PROC(p, curtd)
2464 if (tdcnt >= tdlistsz) {
2466 free(tdlist, M_TEMP);
2470 * Try to add each thread to the list without sleeping. If unable,
2471 * add to a queue to retry after dropping the process lock.
2474 FOREACH_THREAD_IN_PROC(p, curtd) {
2475 tdlist[tdcnt] = pmc_find_thread_descriptor(pp, curtd,
2476 PMC_FLAG_ALLOCATE|PMC_FLAG_NOWAIT);
2477 if (tdlist[tdcnt] == NULL) {
2479 for (i = 0; i <= tdcnt; i++)
2480 pmc_thread_descriptor_pool_free(tdlist[i]);
2481 free(tdlist, M_TEMP);
2487 free(tdlist, M_TEMP);
2491 * find the descriptor corresponding to process 'p', adding or removing it
2492 * as specified by 'mode'.
2495 static struct pmc_process *
2496 pmc_find_process_descriptor(struct proc *p, uint32_t mode)
2499 struct pmc_process *pp, *ppnew;
2500 struct pmc_processhash *pph;
2502 hindex = PMC_HASH_PTR(p, pmc_processhashmask);
2503 pph = &pmc_processhash[hindex];
2508 * Pre-allocate memory in the PMC_FLAG_ALLOCATE case since we
2509 * cannot call malloc(9) once we hold a spin lock.
2511 if (mode & PMC_FLAG_ALLOCATE)
2512 ppnew = malloc(sizeof(struct pmc_process) + md->pmd_npmc *
2513 sizeof(struct pmc_targetstate), M_PMC, M_WAITOK|M_ZERO);
2515 mtx_lock_spin(&pmc_processhash_mtx);
2516 LIST_FOREACH(pp, pph, pp_next)
2517 if (pp->pp_proc == p)
2520 if ((mode & PMC_FLAG_REMOVE) && pp != NULL)
2521 LIST_REMOVE(pp, pp_next);
2523 if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL &&
2526 LIST_INIT(&ppnew->pp_tds);
2527 ppnew->pp_tdslock = mtx_pool_find(pmc_mtxpool, ppnew);
2528 LIST_INSERT_HEAD(pph, ppnew, pp_next);
2529 mtx_unlock_spin(&pmc_processhash_mtx);
2533 /* Add thread descriptors for this process' current threads. */
2534 pmc_add_thread_descriptors_from_proc(p, pp);
2537 mtx_unlock_spin(&pmc_processhash_mtx);
2546 * remove a process descriptor from the process hash table.
2550 pmc_remove_process_descriptor(struct pmc_process *pp)
2552 KASSERT(pp->pp_refcnt == 0,
2553 ("[pmc,%d] Removing process descriptor %p with count %d",
2554 __LINE__, pp, pp->pp_refcnt));
2556 mtx_lock_spin(&pmc_processhash_mtx);
2557 LIST_REMOVE(pp, pp_next);
2558 mtx_unlock_spin(&pmc_processhash_mtx);
2562 * destroy a process descriptor.
2566 pmc_destroy_process_descriptor(struct pmc_process *pp)
2568 struct pmc_thread *pmc_td;
2570 while ((pmc_td = LIST_FIRST(&pp->pp_tds)) != NULL) {
2571 LIST_REMOVE(pmc_td, pt_next);
2572 pmc_thread_descriptor_pool_free(pmc_td);
2579 * find an owner descriptor corresponding to proc 'p'
2582 static struct pmc_owner *
2583 pmc_find_owner_descriptor(struct proc *p)
2586 struct pmc_owner *po;
2587 struct pmc_ownerhash *poh;
2589 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
2590 poh = &pmc_ownerhash[hindex];
2593 LIST_FOREACH(po, poh, po_next)
2594 if (po->po_owner == p)
2597 PMCDBG5(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> "
2598 "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po);
2604 * pmc_allocate_pmc_descriptor
2606 * Allocate a pmc descriptor and initialize its
2611 pmc_allocate_pmc_descriptor(void)
2615 pmc = malloc(sizeof(struct pmc), M_PMC, M_WAITOK|M_ZERO);
2616 pmc->pm_runcount = counter_u64_alloc(M_WAITOK);
2617 pmc->pm_pcpu_state = malloc(sizeof(struct pmc_pcpu_state)*mp_ncpus, M_PMC, M_WAITOK|M_ZERO);
2618 PMCDBG1(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc);
2624 * Destroy a pmc descriptor.
2628 pmc_destroy_pmc_descriptor(struct pmc *pm)
2631 KASSERT(pm->pm_state == PMC_STATE_DELETED ||
2632 pm->pm_state == PMC_STATE_FREE,
2633 ("[pmc,%d] destroying non-deleted PMC", __LINE__));
2634 KASSERT(LIST_EMPTY(&pm->pm_targets),
2635 ("[pmc,%d] destroying pmc with targets", __LINE__));
2636 KASSERT(pm->pm_owner == NULL,
2637 ("[pmc,%d] destroying pmc attached to an owner", __LINE__));
2638 KASSERT(counter_u64_fetch(pm->pm_runcount) == 0,
2639 ("[pmc,%d] pmc has non-zero run count %ld", __LINE__,
2640 (unsigned long)counter_u64_fetch(pm->pm_runcount)));
2642 counter_u64_free(pm->pm_runcount);
2643 free(pm->pm_pcpu_state, M_PMC);
2648 pmc_wait_for_pmc_idle(struct pmc *pm)
2651 volatile int maxloop;
2653 maxloop = 100 * pmc_cpu_max();
2656 * Loop (with a forced context switch) till the PMC's runcount
2657 * comes down to zero.
2659 while (counter_u64_fetch(pm->pm_runcount) > 0) {
2662 KASSERT(maxloop > 0,
2663 ("[pmc,%d] (ri%d, rc%ld) waiting too long for "
2664 "pmc to be free", __LINE__,
2665 PMC_TO_ROWINDEX(pm), (unsigned long)counter_u64_fetch(pm->pm_runcount)));
2667 pmc_force_context_switch();
2672 * This function does the following things:
2674 * - detaches the PMC from hardware
2675 * - unlinks all target threads that were attached to it
2676 * - removes the PMC from its owner's list
2677 * - destroys the PMC private mutex
2679 * Once this function completes, the given pmc pointer can be freed by
2680 * calling pmc_destroy_pmc_descriptor().
2684 pmc_release_pmc_descriptor(struct pmc *pm)
2688 u_int adjri, ri, cpu;
2689 struct pmc_owner *po;
2690 struct pmc_binding pb;
2691 struct pmc_process *pp;
2692 struct pmc_classdep *pcd;
2693 struct pmc_target *ptgt, *tmp;
2695 sx_assert(&pmc_sx, SX_XLOCKED);
2697 KASSERT(pm, ("[pmc,%d] null pmc", __LINE__));
2699 ri = PMC_TO_ROWINDEX(pm);
2700 pcd = pmc_ri_to_classdep(md, ri, &adjri);
2701 mode = PMC_TO_MODE(pm);
2703 PMCDBG3(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri,
2707 * First, we take the PMC off hardware.
2710 if (PMC_IS_SYSTEM_MODE(mode)) {
2713 * A system mode PMC runs on a specific CPU. Switch
2714 * to this CPU and turn hardware off.
2716 pmc_save_cpu_binding(&pb);
2718 cpu = PMC_TO_CPU(pm);
2720 pmc_select_cpu(cpu);
2722 /* switch off non-stalled CPUs */
2723 pm->pm_pcpu_state[cpu].pps_cpustate = 0;
2724 if (pm->pm_state == PMC_STATE_RUNNING &&
2725 pm->pm_pcpu_state[cpu].pps_stalled == 0) {
2727 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
2729 KASSERT(phw->phw_pmc == pm,
2730 ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)",
2731 __LINE__, ri, phw->phw_pmc, pm));
2732 PMCDBG2(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri);
2735 pcd->pcd_stop_pmc(cpu, adjri);
2739 PMCDBG2(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri);
2742 pcd->pcd_config_pmc(cpu, adjri, NULL);
2745 /* adjust the global and process count of SS mode PMCs */
2746 if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) {
2749 if (po->po_sscount == 0) {
2750 atomic_subtract_rel_int(&pmc_ss_count, 1);
2751 CK_LIST_REMOVE(po, po_ssnext);
2752 epoch_wait_preempt(global_epoch_preempt);
2756 pm->pm_state = PMC_STATE_DELETED;
2758 pmc_restore_cpu_binding(&pb);
2761 * We could have references to this PMC structure in
2762 * the per-cpu sample queues. Wait for the queue to
2765 pmc_wait_for_pmc_idle(pm);
2767 } else if (PMC_IS_VIRTUAL_MODE(mode)) {
2770 * A virtual PMC could be running on multiple CPUs at
2773 * By marking its state as DELETED, we ensure that
2774 * this PMC is never further scheduled on hardware.
2776 * Then we wait till all CPUs are done with this PMC.
2778 pm->pm_state = PMC_STATE_DELETED;
2781 /* Wait for the PMCs runcount to come to zero. */
2782 pmc_wait_for_pmc_idle(pm);
2785 * At this point the PMC is off all CPUs and cannot be
2786 * freshly scheduled onto a CPU. It is now safe to
2787 * unlink all targets from this PMC. If a
2788 * process-record's refcount falls to zero, we remove
2789 * it from the hash table. The module-wide SX lock
2790 * protects us from races.
2792 LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) {
2793 pp = ptgt->pt_process;
2794 pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */
2796 PMCDBG1(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt);
2799 * If the target process record shows that no
2800 * PMCs are attached to it, reclaim its space.
2803 if (pp->pp_refcnt == 0) {
2804 pmc_remove_process_descriptor(pp);
2805 pmc_destroy_process_descriptor(pp);
2809 cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */
2814 * Release any MD resources
2816 (void) pcd->pcd_release_pmc(cpu, adjri, pm);
2819 * Update row disposition
2822 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm)))
2823 PMC_UNMARK_ROW_STANDALONE(ri);
2825 PMC_UNMARK_ROW_THREAD(ri);
2827 /* unlink from the owner's list */
2829 LIST_REMOVE(pm, pm_next);
2830 pm->pm_owner = NULL;
2835 * Register an owner and a pmc.
2839 pmc_register_owner(struct proc *p, struct pmc *pmc)
2841 struct pmc_owner *po;
2843 sx_assert(&pmc_sx, SX_XLOCKED);
2845 if ((po = pmc_find_owner_descriptor(p)) == NULL)
2846 if ((po = pmc_allocate_owner_descriptor(p)) == NULL)
2849 KASSERT(pmc->pm_owner == NULL,
2850 ("[pmc,%d] attempting to own an initialized PMC", __LINE__));
2853 LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next);
2856 p->p_flag |= P_HWPMC;
2859 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
2860 pmclog_process_pmcallocate(pmc);
2862 PMCDBG2(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p",
2869 * Return the current row disposition:
2871 * > 0 => PROCESS MODE
2872 * < 0 => SYSTEM MODE
2876 pmc_getrowdisp(int ri)
2878 return pmc_pmcdisp[ri];
2882 * Check if a PMC at row index 'ri' can be allocated to the current
2885 * Allocation can fail if:
2886 * - the current process is already being profiled by a PMC at index 'ri',
2887 * attached to it via OP_PMCATTACH.
2888 * - the current process has already allocated a PMC at index 'ri'
2893 pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu)
2897 struct pmc_owner *po;
2898 struct pmc_process *pp;
2900 PMCDBG5(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d "
2901 "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu);
2904 * We shouldn't have already allocated a process-mode PMC at
2907 * We shouldn't have allocated a system-wide PMC on the same
2910 if ((po = pmc_find_owner_descriptor(p)) != NULL)
2911 LIST_FOREACH(pm, &po->po_pmcs, pm_next) {
2912 if (PMC_TO_ROWINDEX(pm) == ri) {
2913 mode = PMC_TO_MODE(pm);
2914 if (PMC_IS_VIRTUAL_MODE(mode))
2916 if (PMC_IS_SYSTEM_MODE(mode) &&
2917 (int) PMC_TO_CPU(pm) == cpu)
2923 * We also shouldn't be the target of any PMC at this index
2924 * since otherwise a PMC_ATTACH to ourselves will fail.
2926 if ((pp = pmc_find_process_descriptor(p, 0)) != NULL)
2927 if (pp->pp_pmcs[ri].pp_pmc)
2930 PMCDBG4(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok",
2931 p, p->p_pid, p->p_comm, ri);
2937 * Check if a given PMC at row index 'ri' can be currently used in
2942 pmc_can_allocate_row(int ri, enum pmc_mode mode)
2946 sx_assert(&pmc_sx, SX_XLOCKED);
2948 PMCDBG2(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode);
2950 if (PMC_IS_SYSTEM_MODE(mode))
2951 disp = PMC_DISP_STANDALONE;
2953 disp = PMC_DISP_THREAD;
2956 * check disposition for PMC row 'ri':
2958 * Expected disposition Row-disposition Result
2960 * STANDALONE STANDALONE or FREE proceed
2961 * STANDALONE THREAD fail
2962 * THREAD THREAD or FREE proceed
2963 * THREAD STANDALONE fail
2966 if (!PMC_ROW_DISP_IS_FREE(ri) &&
2967 !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)) &&
2968 !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri)))
2975 PMCDBG2(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode);
2982 * Find a PMC descriptor with user handle 'pmcid' for thread 'td'.
2986 pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid)
2990 KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc,
2991 ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__,
2992 PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc));
2994 LIST_FOREACH(pm, &po->po_pmcs, pm_next)
2995 if (pm->pm_id == pmcid)
3002 pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc)
3005 struct pmc *pm, *opm;
3006 struct pmc_owner *po;
3007 struct pmc_process *pp;
3009 PMCDBG1(PMC,FND,1, "find-pmc id=%d", pmcid);
3010 if (PMC_ID_TO_ROWINDEX(pmcid) >= md->pmd_npmc)
3013 if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL) {
3015 * In case of PMC_F_DESCENDANTS child processes we will not find
3016 * the current process in the owners hash list. Find the owner
3017 * process first and from there lookup the po.
3019 if ((pp = pmc_find_process_descriptor(curthread->td_proc,
3020 PMC_FLAG_NONE)) == NULL) {
3023 opm = pp->pp_pmcs[PMC_ID_TO_ROWINDEX(pmcid)].pp_pmc;
3026 if ((opm->pm_flags & (PMC_F_ATTACHED_TO_OWNER|
3027 PMC_F_DESCENDANTS)) != (PMC_F_ATTACHED_TO_OWNER|
3034 if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL)
3037 PMCDBG2(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm);
3048 pmc_start(struct pmc *pm)
3051 struct pmc_owner *po;
3052 struct pmc_binding pb;
3053 struct pmc_classdep *pcd;
3054 int adjri, error, cpu, ri;
3057 ("[pmc,%d] null pm", __LINE__));
3059 mode = PMC_TO_MODE(pm);
3060 ri = PMC_TO_ROWINDEX(pm);
3061 pcd = pmc_ri_to_classdep(md, ri, &adjri);
3065 PMCDBG3(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, mode, ri);
3070 * Disallow PMCSTART if a logfile is required but has not been
3073 if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) &&
3074 (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
3075 return (EDOOFUS); /* programming error */
3078 * If this is a sampling mode PMC, log mapping information for
3079 * the kernel modules that are currently loaded.
3081 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
3082 pmc_log_kernel_mappings(pm);
3084 if (PMC_IS_VIRTUAL_MODE(mode)) {
3087 * If a PMCATTACH has never been done on this PMC,
3088 * attach it to its owner process.
3091 if (LIST_EMPTY(&pm->pm_targets))
3092 error = (pm->pm_flags & PMC_F_ATTACH_DONE) ? ESRCH :
3093 pmc_attach_process(po->po_owner, pm);
3096 * If the PMC is attached to its owner, then force a context
3097 * switch to ensure that the MD state gets set correctly.
3101 pm->pm_state = PMC_STATE_RUNNING;
3102 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER)
3103 pmc_force_context_switch();
3111 * A system-wide PMC.
3113 * Add the owner to the global list if this is a system-wide
3117 if (mode == PMC_MODE_SS) {
3119 * Log mapping information for all existing processes in the
3120 * system. Subsequent mappings are logged as they happen;
3121 * see pmc_process_mmap().
3123 if (po->po_logprocmaps == 0) {
3124 pmc_log_all_process_mappings(po);
3125 po->po_logprocmaps = 1;
3128 if (po->po_sscount == 1) {
3129 atomic_add_rel_int(&pmc_ss_count, 1);
3130 CK_LIST_INSERT_HEAD(&pmc_ss_owners, po, po_ssnext);
3131 PMCDBG1(PMC,OPS,1, "po=%p in global list", po);
3136 * Move to the CPU associated with this
3137 * PMC, and start the hardware.
3140 pmc_save_cpu_binding(&pb);
3142 cpu = PMC_TO_CPU(pm);
3144 if (!pmc_cpu_is_active(cpu))
3147 pmc_select_cpu(cpu);
3150 * global PMCs are configured at allocation time
3151 * so write out the initial value and start the PMC.
3154 pm->pm_state = PMC_STATE_RUNNING;
3157 if ((error = pcd->pcd_write_pmc(cpu, adjri,
3158 PMC_IS_SAMPLING_MODE(mode) ?
3159 pm->pm_sc.pm_reloadcount :
3160 pm->pm_sc.pm_initial)) == 0) {
3161 /* If a sampling mode PMC, reset stalled state. */
3162 if (PMC_IS_SAMPLING_MODE(mode))
3163 pm->pm_pcpu_state[cpu].pps_stalled = 0;
3165 /* Indicate that we desire this to run. Start it. */
3166 pm->pm_pcpu_state[cpu].pps_cpustate = 1;
3167 error = pcd->pcd_start_pmc(cpu, adjri);
3171 pmc_restore_cpu_binding(&pb);
3181 pmc_stop(struct pmc *pm)
3183 struct pmc_owner *po;
3184 struct pmc_binding pb;
3185 struct pmc_classdep *pcd;
3186 int adjri, cpu, error, ri;
3188 KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__));
3190 PMCDBG3(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm,
3191 PMC_TO_MODE(pm), PMC_TO_ROWINDEX(pm));
3193 pm->pm_state = PMC_STATE_STOPPED;
3196 * If the PMC is a virtual mode one, changing the state to
3197 * non-RUNNING is enough to ensure that the PMC never gets
3200 * If this PMC is current running on a CPU, then it will
3201 * handled correctly at the time its target process is context
3205 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
3209 * A system-mode PMC. Move to the CPU associated with
3210 * this PMC, and stop the hardware. We update the
3211 * 'initial count' so that a subsequent PMCSTART will
3212 * resume counting from the current hardware count.
3215 pmc_save_cpu_binding(&pb);
3217 cpu = PMC_TO_CPU(pm);
3219 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
3220 ("[pmc,%d] illegal cpu=%d", __LINE__, cpu));
3222 if (!pmc_cpu_is_active(cpu))
3225 pmc_select_cpu(cpu);
3227 ri = PMC_TO_ROWINDEX(pm);
3228 pcd = pmc_ri_to_classdep(md, ri, &adjri);
3230 pm->pm_pcpu_state[cpu].pps_cpustate = 0;
3232 if ((error = pcd->pcd_stop_pmc(cpu, adjri)) == 0)
3233 error = pcd->pcd_read_pmc(cpu, adjri, &pm->pm_sc.pm_initial);
3236 pmc_restore_cpu_binding(&pb);
3240 /* remove this owner from the global list of SS PMC owners */
3241 if (PMC_TO_MODE(pm) == PMC_MODE_SS) {
3243 if (po->po_sscount == 0) {
3244 atomic_subtract_rel_int(&pmc_ss_count, 1);
3245 CK_LIST_REMOVE(po, po_ssnext);
3246 epoch_wait_preempt(global_epoch_preempt);
3247 PMCDBG1(PMC,OPS,2,"po=%p removed from global list", po);
3256 static const char *pmc_op_to_name[] = {
3258 #define __PMC_OP(N, D) #N ,
3265 * The syscall interface
3268 #define PMC_GET_SX_XLOCK(...) do { \
3269 sx_xlock(&pmc_sx); \
3270 if (pmc_hook == NULL) { \
3271 sx_xunlock(&pmc_sx); \
3272 return __VA_ARGS__; \
3276 #define PMC_DOWNGRADE_SX() do { \
3277 sx_downgrade(&pmc_sx); \
3278 is_sx_downgraded = 1; \
3282 pmc_syscall_handler(struct thread *td, void *syscall_args)
3284 int error, is_sx_downgraded, op;
3285 struct pmc_syscall_args *c;
3286 void *pmclog_proc_handle;
3289 c = (struct pmc_syscall_args *)syscall_args;
3292 /* PMC isn't set up yet */
3293 if (pmc_hook == NULL)
3295 if (op == PMC_OP_CONFIGURELOG) {
3297 * We cannot create the logging process inside
3298 * pmclog_configure_log() because there is a LOR
3299 * between pmc_sx and process structure locks.
3300 * Instead, pre-create the process and ignite the loop
3301 * if everything is fine, otherwise direct the process
3304 error = pmclog_proc_create(td, &pmclog_proc_handle);
3309 PMC_GET_SX_XLOCK(ENOSYS);
3310 is_sx_downgraded = 0;
3311 PMCDBG3(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op,
3312 pmc_op_to_name[op], arg);
3315 counter_u64_add(pmc_stats.pm_syscalls, 1);
3321 * Configure a log file.
3323 * XXX This OP will be reworked.
3326 case PMC_OP_CONFIGURELOG:
3330 struct pmc_owner *po;
3331 struct pmc_op_configurelog cl;
3333 if ((error = copyin(arg, &cl, sizeof(cl))) != 0) {
3334 pmclog_proc_ignite(pmclog_proc_handle, NULL);
3338 /* mark this process as owning a log file */
3340 if ((po = pmc_find_owner_descriptor(p)) == NULL)
3341 if ((po = pmc_allocate_owner_descriptor(p)) == NULL) {
3342 pmclog_proc_ignite(pmclog_proc_handle, NULL);
3348 * If a valid fd was passed in, try to configure that,
3349 * otherwise if 'fd' was less than zero and there was
3350 * a log file configured, flush its buffers and
3353 if (cl.pm_logfd >= 0) {
3354 error = pmclog_configure_log(md, po, cl.pm_logfd);
3355 pmclog_proc_ignite(pmclog_proc_handle, error == 0 ?
3357 } else if (po->po_flags & PMC_PO_OWNS_LOGFILE) {
3358 pmclog_proc_ignite(pmclog_proc_handle, NULL);
3359 error = pmclog_close(po);
3361 LIST_FOREACH(pm, &po->po_pmcs, pm_next)
3362 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
3363 pm->pm_state == PMC_STATE_RUNNING)
3365 error = pmclog_deconfigure_log(po);
3368 pmclog_proc_ignite(pmclog_proc_handle, NULL);
3378 case PMC_OP_FLUSHLOG:
3380 struct pmc_owner *po;
3382 sx_assert(&pmc_sx, SX_XLOCKED);
3384 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
3389 error = pmclog_flush(po);
3397 case PMC_OP_CLOSELOG:
3399 struct pmc_owner *po;
3401 sx_assert(&pmc_sx, SX_XLOCKED);
3403 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
3408 error = pmclog_close(po);
3413 * Retrieve hardware configuration.
3416 case PMC_OP_GETCPUINFO: /* CPU information */
3418 struct pmc_op_getcpuinfo gci;
3419 struct pmc_classinfo *pci;
3420 struct pmc_classdep *pcd;
3423 gci.pm_cputype = md->pmd_cputype;
3424 gci.pm_ncpu = pmc_cpu_max();
3425 gci.pm_npmc = md->pmd_npmc;
3426 gci.pm_nclass = md->pmd_nclass;
3427 pci = gci.pm_classes;
3428 pcd = md->pmd_classdep;
3429 for (cl = 0; cl < md->pmd_nclass; cl++, pci++, pcd++) {
3430 pci->pm_caps = pcd->pcd_caps;
3431 pci->pm_class = pcd->pcd_class;
3432 pci->pm_width = pcd->pcd_width;
3433 pci->pm_num = pcd->pcd_num;
3435 error = copyout(&gci, arg, sizeof(gci));
3440 * Retrieve soft events list.
3442 case PMC_OP_GETDYNEVENTINFO:
3446 struct pmc_op_getdyneventinfo *gei;
3447 struct pmc_dyn_event_descr dev;
3448 struct pmc_soft *ps;
3451 sx_assert(&pmc_sx, SX_LOCKED);
3453 gei = (struct pmc_op_getdyneventinfo *) arg;
3455 if ((error = copyin(&gei->pm_class, &cl, sizeof(cl))) != 0)
3458 /* Only SOFT class is dynamic. */
3459 if (cl != PMC_CLASS_SOFT) {
3465 for (ev = PMC_EV_SOFT_FIRST; (int)ev <= PMC_EV_SOFT_LAST; ev++) {
3466 ps = pmc_soft_ev_acquire(ev);
3469 bcopy(&ps->ps_ev, &dev, sizeof(dev));
3470 pmc_soft_ev_release(ps);
3472 error = copyout(&dev,
3473 &gei->pm_events[nevent],
3474 sizeof(struct pmc_dyn_event_descr));
3482 error = copyout(&nevent, &gei->pm_nevent,
3488 * Get module statistics
3491 case PMC_OP_GETDRIVERSTATS:
3493 struct pmc_op_getdriverstats gms;
3494 #define CFETCH(a, b, field) a.field = counter_u64_fetch(b.field)
3495 CFETCH(gms, pmc_stats, pm_intr_ignored);
3496 CFETCH(gms, pmc_stats, pm_intr_processed);
3497 CFETCH(gms, pmc_stats, pm_intr_bufferfull);
3498 CFETCH(gms, pmc_stats, pm_syscalls);
3499 CFETCH(gms, pmc_stats, pm_syscall_errors);
3500 CFETCH(gms, pmc_stats, pm_buffer_requests);
3501 CFETCH(gms, pmc_stats, pm_buffer_requests_failed);
3502 CFETCH(gms, pmc_stats, pm_log_sweeps);
3504 error = copyout(&gms, arg, sizeof(gms));
3510 * Retrieve module version number
3513 case PMC_OP_GETMODULEVERSION:
3517 /* retrieve the client's idea of the ABI version */
3518 if ((error = copyin(arg, &cv, sizeof(uint32_t))) != 0)
3520 /* don't service clients newer than our driver */
3522 if ((cv & 0xFFFF0000) > (modv & 0xFFFF0000)) {
3523 error = EPROGMISMATCH;
3526 error = copyout(&modv, arg, sizeof(int));
3532 * Retrieve the state of all the PMCs on a given
3536 case PMC_OP_GETPMCINFO:
3540 size_t pmcinfo_size;
3541 uint32_t cpu, n, npmc;
3542 struct pmc_owner *po;
3543 struct pmc_binding pb;
3544 struct pmc_classdep *pcd;
3545 struct pmc_info *p, *pmcinfo;
3546 struct pmc_op_getpmcinfo *gpi;
3550 gpi = (struct pmc_op_getpmcinfo *) arg;
3552 if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0)
3555 if (cpu >= pmc_cpu_max()) {
3560 if (!pmc_cpu_is_active(cpu)) {
3565 /* switch to CPU 'cpu' */
3566 pmc_save_cpu_binding(&pb);
3567 pmc_select_cpu(cpu);
3569 npmc = md->pmd_npmc;
3571 pmcinfo_size = npmc * sizeof(struct pmc_info);
3572 pmcinfo = malloc(pmcinfo_size, M_PMC, M_WAITOK);
3576 for (n = 0; n < md->pmd_npmc; n++, p++) {
3578 pcd = pmc_ri_to_classdep(md, n, &ari);
3580 KASSERT(pcd != NULL,
3581 ("[pmc,%d] null pcd ri=%d", __LINE__, n));
3583 if ((error = pcd->pcd_describe(cpu, ari, p, &pm)) != 0)
3586 if (PMC_ROW_DISP_IS_STANDALONE(n))
3587 p->pm_rowdisp = PMC_DISP_STANDALONE;
3588 else if (PMC_ROW_DISP_IS_THREAD(n))
3589 p->pm_rowdisp = PMC_DISP_THREAD;
3591 p->pm_rowdisp = PMC_DISP_FREE;
3593 p->pm_ownerpid = -1;
3595 if (pm == NULL) /* no PMC associated */
3600 KASSERT(po->po_owner != NULL,
3601 ("[pmc,%d] pmc_owner had a null proc pointer",
3604 p->pm_ownerpid = po->po_owner->p_pid;
3605 p->pm_mode = PMC_TO_MODE(pm);
3606 p->pm_event = pm->pm_event;
3607 p->pm_flags = pm->pm_flags;
3609 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
3611 pm->pm_sc.pm_reloadcount;
3614 pmc_restore_cpu_binding(&pb);
3616 /* now copy out the PMC info collected */
3618 error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size);
3620 free(pmcinfo, M_PMC);
3626 * Set the administrative state of a PMC. I.e. whether
3627 * the PMC is to be used or not.
3630 case PMC_OP_PMCADMIN:
3633 enum pmc_state request;
3636 struct pmc_op_pmcadmin pma;
3637 struct pmc_binding pb;
3639 sx_assert(&pmc_sx, SX_XLOCKED);
3641 KASSERT(td == curthread,
3642 ("[pmc,%d] td != curthread", __LINE__));
3644 error = priv_check(td, PRIV_PMC_MANAGE);
3648 if ((error = copyin(arg, &pma, sizeof(pma))) != 0)
3653 if (cpu < 0 || cpu >= (int) pmc_cpu_max()) {
3658 if (!pmc_cpu_is_active(cpu)) {
3663 request = pma.pm_state;
3665 if (request != PMC_STATE_DISABLED &&
3666 request != PMC_STATE_FREE) {
3671 ri = pma.pm_pmc; /* pmc id == row index */
3672 if (ri < 0 || ri >= (int) md->pmd_npmc) {
3678 * We can't disable a PMC with a row-index allocated
3679 * for process virtual PMCs.
3682 if (PMC_ROW_DISP_IS_THREAD(ri) &&
3683 request == PMC_STATE_DISABLED) {
3689 * otherwise, this PMC on this CPU is either free or
3690 * in system-wide mode.
3693 pmc_save_cpu_binding(&pb);
3694 pmc_select_cpu(cpu);
3697 phw = pc->pc_hwpmcs[ri];
3700 * XXX do we need some kind of 'forced' disable?
3703 if (phw->phw_pmc == NULL) {
3704 if (request == PMC_STATE_DISABLED &&
3705 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) {
3706 phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED;
3707 PMC_MARK_ROW_STANDALONE(ri);
3708 } else if (request == PMC_STATE_FREE &&
3709 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) {
3710 phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED;
3711 PMC_UNMARK_ROW_STANDALONE(ri);
3713 /* other cases are a no-op */
3717 pmc_restore_cpu_binding(&pb);
3726 case PMC_OP_PMCALLOCATE:
3734 struct pmc_binding pb;
3735 struct pmc_classdep *pcd;
3736 struct pmc_op_pmcallocate pa;
3738 if ((error = copyin(arg, &pa, sizeof(pa))) != 0)
3745 if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC &&
3746 mode != PMC_MODE_TS && mode != PMC_MODE_TC) ||
3747 (cpu != (u_int) PMC_CPU_ANY && cpu >= pmc_cpu_max())) {
3753 * Virtual PMCs should only ask for a default CPU.
3754 * System mode PMCs need to specify a non-default CPU.
3757 if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != (u_int) PMC_CPU_ANY) ||
3758 (PMC_IS_SYSTEM_MODE(mode) && cpu == (u_int) PMC_CPU_ANY)) {
3764 * Check that an inactive CPU is not being asked for.
3767 if (PMC_IS_SYSTEM_MODE(mode) && !pmc_cpu_is_active(cpu)) {
3773 * Refuse an allocation for a system-wide PMC if this
3774 * process has been jailed, or if this process lacks
3775 * super-user credentials and the sysctl tunable
3776 * 'security.bsd.unprivileged_syspmcs' is zero.
3779 if (PMC_IS_SYSTEM_MODE(mode)) {
3780 if (jailed(curthread->td_ucred)) {
3784 if (!pmc_unprivileged_syspmcs) {
3785 error = priv_check(curthread,
3793 * Look for valid values for 'pm_flags'
3796 if ((pa.pm_flags & ~(PMC_F_DESCENDANTS | PMC_F_LOG_PROCCSW |
3797 PMC_F_LOG_PROCEXIT | PMC_F_CALLCHAIN)) != 0) {
3802 /* process logging options are not allowed for system PMCs */
3803 if (PMC_IS_SYSTEM_MODE(mode) && (pa.pm_flags &
3804 (PMC_F_LOG_PROCCSW | PMC_F_LOG_PROCEXIT))) {
3810 * All sampling mode PMCs need to be able to interrupt the
3813 if (PMC_IS_SAMPLING_MODE(mode))
3814 caps |= PMC_CAP_INTERRUPT;
3816 /* A valid class specifier should have been passed in. */
3817 for (n = 0; n < md->pmd_nclass; n++)
3818 if (md->pmd_classdep[n].pcd_class == pa.pm_class)
3820 if (n == md->pmd_nclass) {
3825 /* The requested PMC capabilities should be feasible. */
3826 if ((md->pmd_classdep[n].pcd_caps & caps) != caps) {
3831 PMCDBG4(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d",
3832 pa.pm_ev, caps, mode, cpu);
3834 pmc = pmc_allocate_pmc_descriptor();
3835 pmc->pm_id = PMC_ID_MAKE_ID(cpu,pa.pm_mode,pa.pm_class,
3837 pmc->pm_event = pa.pm_ev;
3838 pmc->pm_state = PMC_STATE_FREE;
3839 pmc->pm_caps = caps;
3840 pmc->pm_flags = pa.pm_flags;
3842 /* switch thread to CPU 'cpu' */
3843 pmc_save_cpu_binding(&pb);
3845 #define PMC_IS_SHAREABLE_PMC(cpu, n) \
3846 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state & \
3847 PMC_PHW_FLAG_IS_SHAREABLE)
3848 #define PMC_IS_UNALLOCATED(cpu, n) \
3849 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL)
3851 if (PMC_IS_SYSTEM_MODE(mode)) {
3852 pmc_select_cpu(cpu);
3853 for (n = 0; n < (int) md->pmd_npmc; n++) {
3854 pcd = pmc_ri_to_classdep(md, n, &adjri);
3855 if (pmc_can_allocate_row(n, mode) == 0 &&
3856 pmc_can_allocate_rowindex(
3857 curthread->td_proc, n, cpu) == 0 &&
3858 (PMC_IS_UNALLOCATED(cpu, n) ||
3859 PMC_IS_SHAREABLE_PMC(cpu, n)) &&
3860 pcd->pcd_allocate_pmc(cpu, adjri, pmc,
3865 /* Process virtual mode */
3866 for (n = 0; n < (int) md->pmd_npmc; n++) {
3867 pcd = pmc_ri_to_classdep(md, n, &adjri);
3868 if (pmc_can_allocate_row(n, mode) == 0 &&
3869 pmc_can_allocate_rowindex(
3870 curthread->td_proc, n,
3871 PMC_CPU_ANY) == 0 &&
3872 pcd->pcd_allocate_pmc(curthread->td_oncpu,
3873 adjri, pmc, &pa) == 0)
3878 #undef PMC_IS_UNALLOCATED
3879 #undef PMC_IS_SHAREABLE_PMC
3881 pmc_restore_cpu_binding(&pb);
3883 if (n == (int) md->pmd_npmc) {
3884 pmc_destroy_pmc_descriptor(pmc);
3890 /* Fill in the correct value in the ID field */
3891 pmc->pm_id = PMC_ID_MAKE_ID(cpu,mode,pa.pm_class,n);
3893 PMCDBG5(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x",
3894 pmc->pm_event, pa.pm_class, mode, n, pmc->pm_id);
3896 /* Process mode PMCs with logging enabled need log files */
3897 if (pmc->pm_flags & (PMC_F_LOG_PROCEXIT | PMC_F_LOG_PROCCSW))
3898 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
3900 /* All system mode sampling PMCs require a log file */
3901 if (PMC_IS_SAMPLING_MODE(mode) && PMC_IS_SYSTEM_MODE(mode))
3902 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
3905 * Configure global pmc's immediately
3908 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pmc))) {
3910 pmc_save_cpu_binding(&pb);
3911 pmc_select_cpu(cpu);
3913 phw = pmc_pcpu[cpu]->pc_hwpmcs[n];
3914 pcd = pmc_ri_to_classdep(md, n, &adjri);
3916 if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0 ||
3917 (error = pcd->pcd_config_pmc(cpu, adjri, pmc)) != 0) {
3918 (void) pcd->pcd_release_pmc(cpu, adjri, pmc);
3919 pmc_destroy_pmc_descriptor(pmc);
3921 pmc_restore_cpu_binding(&pb);
3926 pmc_restore_cpu_binding(&pb);
3929 pmc->pm_state = PMC_STATE_ALLOCATED;
3932 * mark row disposition
3935 if (PMC_IS_SYSTEM_MODE(mode))
3936 PMC_MARK_ROW_STANDALONE(n);
3938 PMC_MARK_ROW_THREAD(n);
3941 * Register this PMC with the current thread as its owner.
3945 pmc_register_owner(curthread->td_proc, pmc)) != 0) {
3946 pmc_release_pmc_descriptor(pmc);
3947 pmc_destroy_pmc_descriptor(pmc);
3953 * Return the allocated index.
3956 pa.pm_pmcid = pmc->pm_id;
3958 error = copyout(&pa, arg, sizeof(pa));
3964 * Attach a PMC to a process.
3967 case PMC_OP_PMCATTACH:
3971 struct pmc_op_pmcattach a;
3973 sx_assert(&pmc_sx, SX_XLOCKED);
3975 if ((error = copyin(arg, &a, sizeof(a))) != 0)
3981 } else if (a.pm_pid == 0)
3982 a.pm_pid = td->td_proc->p_pid;
3984 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
3987 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
3992 /* PMCs may be (re)attached only when allocated or stopped */
3993 if (pm->pm_state == PMC_STATE_RUNNING) {
3996 } else if (pm->pm_state != PMC_STATE_ALLOCATED &&
3997 pm->pm_state != PMC_STATE_STOPPED) {
4003 if ((p = pfind(a.pm_pid)) == NULL) {
4009 * Ignore processes that are working on exiting.
4011 if (p->p_flag & P_WEXIT) {
4013 PROC_UNLOCK(p); /* pfind() returns a locked process */
4018 * we are allowed to attach a PMC to a process if
4021 error = p_candebug(curthread, p);
4026 error = pmc_attach_process(p, pm);
4032 * Detach an attached PMC from a process.
4035 case PMC_OP_PMCDETACH:
4039 struct pmc_op_pmcattach a;
4041 if ((error = copyin(arg, &a, sizeof(a))) != 0)
4047 } else if (a.pm_pid == 0)
4048 a.pm_pid = td->td_proc->p_pid;
4050 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
4053 if ((p = pfind(a.pm_pid)) == NULL) {
4059 * Treat processes that are in the process of exiting
4060 * as if they were not present.
4063 if (p->p_flag & P_WEXIT)
4066 PROC_UNLOCK(p); /* pfind() returns a locked process */
4069 error = pmc_detach_process(p, pm);
4075 * Retrieve the MSR number associated with the counter
4076 * 'pmc_id'. This allows processes to directly use RDPMC
4077 * instructions to read their PMCs, without the overhead of a
4081 case PMC_OP_PMCGETMSR:
4085 struct pmc_target *pt;
4086 struct pmc_op_getmsr gm;
4087 struct pmc_classdep *pcd;
4091 if ((error = copyin(arg, &gm, sizeof(gm))) != 0)
4094 if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0)
4098 * The allocated PMC has to be a process virtual PMC,
4099 * i.e., of type MODE_T[CS]. Global PMCs can only be
4100 * read using the PMCREAD operation since they may be
4101 * allocated on a different CPU than the one we could
4102 * be running on at the time of the RDPMC instruction.
4104 * The GETMSR operation is not allowed for PMCs that
4105 * are inherited across processes.
4108 if (!PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) ||
4109 (pm->pm_flags & PMC_F_DESCENDANTS)) {
4115 * It only makes sense to use a RDPMC (or its
4116 * equivalent instruction on non-x86 architectures) on
4117 * a process that has allocated and attached a PMC to
4118 * itself. Conversely the PMC is only allowed to have
4119 * one process attached to it -- its owner.
4122 if ((pt = LIST_FIRST(&pm->pm_targets)) == NULL ||
4123 LIST_NEXT(pt, pt_next) != NULL ||
4124 pt->pt_process->pp_proc != pm->pm_owner->po_owner) {
4129 ri = PMC_TO_ROWINDEX(pm);
4130 pcd = pmc_ri_to_classdep(md, ri, &adjri);
4132 /* PMC class has no 'GETMSR' support */
4133 if (pcd->pcd_get_msr == NULL) {
4138 if ((error = (*pcd->pcd_get_msr)(adjri, &gm.pm_msr)) < 0)
4141 if ((error = copyout(&gm, arg, sizeof(gm))) < 0)
4145 * Mark our process as using MSRs. Update machine
4146 * state using a forced context switch.
4149 pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS;
4150 pmc_force_context_switch();
4156 * Release an allocated PMC
4159 case PMC_OP_PMCRELEASE:
4163 struct pmc_owner *po;
4164 struct pmc_op_simple sp;
4167 * Find PMC pointer for the named PMC.
4169 * Use pmc_release_pmc_descriptor() to switch off the
4170 * PMC, remove all its target threads, and remove the
4171 * PMC from its owner's list.
4173 * Remove the owner record if this is the last PMC
4179 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
4182 pmcid = sp.pm_pmcid;
4184 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
4188 pmc_release_pmc_descriptor(pm);
4189 pmc_maybe_remove_owner(po);
4190 pmc_destroy_pmc_descriptor(pm);
4196 * Read and/or write a PMC.
4204 pmc_value_t oldvalue;
4205 struct pmc_binding pb;
4206 struct pmc_op_pmcrw prw;
4207 struct pmc_classdep *pcd;
4208 struct pmc_op_pmcrw *pprw;
4212 if ((error = copyin(arg, &prw, sizeof(prw))) != 0)
4216 PMCDBG2(PMC,OPS,1, "rw id=%d flags=0x%x", prw.pm_pmcid,
4219 /* must have at least one flag set */
4220 if ((prw.pm_flags & (PMC_F_OLDVALUE|PMC_F_NEWVALUE)) == 0) {
4225 /* locate pmc descriptor */
4226 if ((error = pmc_find_pmc(prw.pm_pmcid, &pm)) != 0)
4229 /* Can't read a PMC that hasn't been started. */
4230 if (pm->pm_state != PMC_STATE_ALLOCATED &&
4231 pm->pm_state != PMC_STATE_STOPPED &&
4232 pm->pm_state != PMC_STATE_RUNNING) {
4237 /* writing a new value is allowed only for 'STOPPED' pmcs */
4238 if (pm->pm_state == PMC_STATE_RUNNING &&
4239 (prw.pm_flags & PMC_F_NEWVALUE)) {
4244 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) {
4247 * If this PMC is attached to its owner (i.e.,
4248 * the process requesting this operation) and
4249 * is running, then attempt to get an
4250 * upto-date reading from hardware for a READ.
4251 * Writes are only allowed when the PMC is
4252 * stopped, so only update the saved value
4255 * If the PMC is not running, or is not
4256 * attached to its owner, read/write to the
4260 ri = PMC_TO_ROWINDEX(pm);
4261 pcd = pmc_ri_to_classdep(md, ri, &adjri);
4263 mtx_pool_lock_spin(pmc_mtxpool, pm);
4264 cpu = curthread->td_oncpu;
4266 if (prw.pm_flags & PMC_F_OLDVALUE) {
4267 if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) &&
4268 (pm->pm_state == PMC_STATE_RUNNING))
4269 error = (*pcd->pcd_read_pmc)(cpu, adjri,
4272 oldvalue = pm->pm_gv.pm_savedvalue;
4274 if (prw.pm_flags & PMC_F_NEWVALUE)
4275 pm->pm_gv.pm_savedvalue = prw.pm_value;
4277 mtx_pool_unlock_spin(pmc_mtxpool, pm);
4279 } else { /* System mode PMCs */
4280 cpu = PMC_TO_CPU(pm);
4281 ri = PMC_TO_ROWINDEX(pm);
4282 pcd = pmc_ri_to_classdep(md, ri, &adjri);
4284 if (!pmc_cpu_is_active(cpu)) {
4289 /* move this thread to CPU 'cpu' */
4290 pmc_save_cpu_binding(&pb);
4291 pmc_select_cpu(cpu);
4294 /* save old value */
4295 if (prw.pm_flags & PMC_F_OLDVALUE)
4296 if ((error = (*pcd->pcd_read_pmc)(cpu, adjri,
4299 /* write out new value */
4300 if (prw.pm_flags & PMC_F_NEWVALUE)
4301 error = (*pcd->pcd_write_pmc)(cpu, adjri,
4305 pmc_restore_cpu_binding(&pb);
4310 pprw = (struct pmc_op_pmcrw *) arg;
4313 if (prw.pm_flags & PMC_F_NEWVALUE)
4314 PMCDBG3(PMC,OPS,2, "rw id=%d new %jx -> old %jx",
4315 ri, prw.pm_value, oldvalue);
4316 else if (prw.pm_flags & PMC_F_OLDVALUE)
4317 PMCDBG2(PMC,OPS,2, "rw id=%d -> old %jx", ri, oldvalue);
4320 /* return old value if requested */
4321 if (prw.pm_flags & PMC_F_OLDVALUE)
4322 if ((error = copyout(&oldvalue, &pprw->pm_value,
4323 sizeof(prw.pm_value))))
4331 * Set the sampling rate for a sampling mode PMC and the
4332 * initial count for a counting mode PMC.
4335 case PMC_OP_PMCSETCOUNT:
4338 struct pmc_op_pmcsetcount sc;
4342 if ((error = copyin(arg, &sc, sizeof(sc))) != 0)
4345 if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0)
4348 if (pm->pm_state == PMC_STATE_RUNNING) {
4353 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
4354 pm->pm_sc.pm_reloadcount = sc.pm_count;
4356 pm->pm_sc.pm_initial = sc.pm_count;
4365 case PMC_OP_PMCSTART:
4369 struct pmc_op_simple sp;
4371 sx_assert(&pmc_sx, SX_XLOCKED);
4373 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
4376 pmcid = sp.pm_pmcid;
4378 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
4381 KASSERT(pmcid == pm->pm_id,
4382 ("[pmc,%d] pmcid %x != id %x", __LINE__,
4385 if (pm->pm_state == PMC_STATE_RUNNING) /* already running */
4387 else if (pm->pm_state != PMC_STATE_STOPPED &&
4388 pm->pm_state != PMC_STATE_ALLOCATED) {
4393 error = pmc_start(pm);
4402 case PMC_OP_PMCSTOP:
4406 struct pmc_op_simple sp;
4410 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
4413 pmcid = sp.pm_pmcid;
4416 * Mark the PMC as inactive and invoke the MD stop
4417 * routines if needed.
4420 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
4423 KASSERT(pmcid == pm->pm_id,
4424 ("[pmc,%d] pmc id %x != pmcid %x", __LINE__,
4427 if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */
4429 else if (pm->pm_state != PMC_STATE_RUNNING) {
4434 error = pmc_stop(pm);
4440 * Write a user supplied value to the log file.
4443 case PMC_OP_WRITELOG:
4445 struct pmc_op_writelog wl;
4446 struct pmc_owner *po;
4450 if ((error = copyin(arg, &wl, sizeof(wl))) != 0)
4453 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
4458 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
4463 error = pmclog_process_userlog(po, &wl);
4473 if (is_sx_downgraded)
4474 sx_sunlock(&pmc_sx);
4476 sx_xunlock(&pmc_sx);
4479 counter_u64_add(pmc_stats.pm_syscall_errors, 1);
4490 * Mark the thread as needing callchain capture and post an AST. The
4491 * actual callchain capture will be done in a context where it is safe
4492 * to take page faults.
4496 pmc_post_callchain_callback(void)
4503 * If there is multiple PMCs for the same interrupt ignore new post
4505 if (td->td_pflags & TDP_CALLCHAIN)
4509 * Mark this thread as needing callchain capture.
4510 * `td->td_pflags' will be safe to touch because this thread
4511 * was in user space when it was interrupted.
4513 td->td_pflags |= TDP_CALLCHAIN;
4516 * Don't let this thread migrate between CPUs until callchain
4517 * capture completes.
4525 * Interrupt processing.
4527 * Find a free slot in the per-cpu array of samples and capture the
4528 * current callchain there. If a sample was successfully added, a bit
4529 * is set in mask 'pmc_cpumask' denoting that the DO_SAMPLES hook
4530 * needs to be invoked from the clock handler.
4532 * This function is meant to be called from an NMI handler. It cannot
4533 * use any of the locking primitives supplied by the OS.
4537 pmc_process_interrupt(int cpu, int ring, struct pmc *pm, struct trapframe *tf,
4540 int error, callchaindepth;
4542 struct pmc_sample *ps;
4543 struct pmc_samplebuffer *psb;
4548 * Allocate space for a sample buffer.
4550 psb = pmc_pcpu[cpu]->pc_sb[ring];
4553 if (ps->ps_nsamples) { /* in use, reader hasn't caught up */
4554 pm->pm_pcpu_state[cpu].pps_stalled = 1;
4555 counter_u64_add(pmc_stats.pm_intr_bufferfull, 1);
4556 PMCDBG6(SAM,INT,1,"(spc) cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d",
4557 cpu, pm, (void *) tf, inuserspace,
4558 (int) (psb->ps_write - psb->ps_samples),
4559 (int) (psb->ps_read - psb->ps_samples));
4566 /* Fill in entry. */
4567 PMCDBG6(SAM,INT,1,"cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", cpu, pm,
4568 (void *) tf, inuserspace,
4569 (int) (psb->ps_write - psb->ps_samples),
4570 (int) (psb->ps_read - psb->ps_samples));
4572 KASSERT(counter_u64_fetch(pm->pm_runcount) >= 0,
4573 ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm,
4574 (unsigned long)counter_u64_fetch(pm->pm_runcount)));
4576 counter_u64_add(pm->pm_runcount, 1); /* hold onto PMC */
4579 if ((td = curthread) && td->td_proc)
4580 ps->ps_pid = td->td_proc->p_pid;
4585 ps->ps_flags = inuserspace ? PMC_CC_F_USERSPACE : 0;
4587 callchaindepth = (pm->pm_flags & PMC_F_CALLCHAIN) ?
4588 pmc_callchaindepth : 1;
4590 if (callchaindepth == 1)
4591 ps->ps_pc[0] = PMC_TRAPFRAME_TO_PC(tf);
4594 * Kernel stack traversals can be done immediately,
4595 * while we defer to an AST for user space traversals.
4599 pmc_save_kernel_callchain(ps->ps_pc,
4600 callchaindepth, tf);
4602 pmc_post_callchain_callback();
4603 callchaindepth = PMC_SAMPLE_INUSE;
4607 ps->ps_nsamples = callchaindepth; /* mark entry as in use */
4609 /* increment write pointer, modulo ring buffer size */
4611 if (ps == psb->ps_fence)
4612 psb->ps_write = psb->ps_samples;
4617 /* mark CPU as needing processing */
4618 if (callchaindepth != PMC_SAMPLE_INUSE)
4619 DPCPU_SET(pmc_sampled, 1);
4625 * Capture a user call chain. This function will be called from ast()
4626 * before control returns to userland and before the process gets
4631 pmc_capture_user_callchain(int cpu, int ring, struct trapframe *tf)
4635 struct pmc_sample *ps, *ps_end;
4636 struct pmc_samplebuffer *psb;
4642 psb = pmc_pcpu[cpu]->pc_sb[ring];
4645 KASSERT(td->td_pflags & TDP_CALLCHAIN,
4646 ("[pmc,%d] Retrieving callchain for thread that doesn't want it",
4655 * Iterate through all deferred callchain requests.
4656 * Walk from the current read pointer to the current
4661 ps_end = psb->ps_write;
4664 if ((ps->ps_pmc == NULL) ||
4665 (ps->ps_pmc->pm_state != PMC_STATE_RUNNING))
4668 if (ps->ps_nsamples != PMC_SAMPLE_INUSE)
4670 if (ps->ps_td != td)
4673 KASSERT(ps->ps_cpu == cpu,
4674 ("[pmc,%d] cpu mismatch ps_cpu=%d pcpu=%d", __LINE__,
4675 ps->ps_cpu, PCPU_GET(cpuid)));
4679 KASSERT(pm->pm_flags & PMC_F_CALLCHAIN,
4680 ("[pmc,%d] Retrieving callchain for PMC that doesn't "
4681 "want it", __LINE__));
4683 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,
4684 ("[pmc,%d] runcount %ld", __LINE__, (unsigned long)counter_u64_fetch(pm->pm_runcount)));
4687 * Retrieve the callchain and mark the sample buffer
4688 * as 'processable' by the timer tick sweep code.
4690 ps->ps_nsamples = pmc_save_user_callchain(ps->ps_pc,
4691 pmc_callchaindepth, tf);
4698 /* increment the pointer, modulo sample ring size */
4699 if (++ps == psb->ps_fence)
4700 ps = psb->ps_samples;
4701 } while (ps != ps_end);
4704 KASSERT(ncallchains > 0 || nfree > 0,
4705 ("[pmc,%d] cpu %d didn't find a sample to collect", __LINE__,
4709 KASSERT(td->td_pinned == 1,
4710 ("[pmc,%d] invalid td_pinned value", __LINE__));
4711 sched_unpin(); /* Can migrate safely now. */
4713 /* mark CPU as needing processing */
4714 DPCPU_SET(pmc_sampled, 1);
4718 * Process saved PC samples.
4722 pmc_process_samples(int cpu, int ring)
4727 struct pmc_owner *po;
4728 struct pmc_sample *ps;
4729 struct pmc_classdep *pcd;
4730 struct pmc_samplebuffer *psb;
4732 KASSERT(PCPU_GET(cpuid) == cpu,
4733 ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", __LINE__,
4734 PCPU_GET(cpuid), cpu));
4736 psb = pmc_pcpu[cpu]->pc_sb[ring];
4738 for (n = 0; n < pmc_nsamples; n++) { /* bound on #iterations */
4741 if (ps->ps_nsamples == PMC_SAMPLE_FREE)
4746 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,
4747 ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm,
4748 (unsigned long)counter_u64_fetch(pm->pm_runcount)));
4752 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
4753 ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__,
4754 pm, PMC_TO_MODE(pm)));
4756 /* Ignore PMCs that have been switched off */
4757 if (pm->pm_state != PMC_STATE_RUNNING)
4760 /* If there is a pending AST wait for completion */
4761 if (ps->ps_nsamples == PMC_SAMPLE_INUSE) {
4762 /* Need a rescan at a later time. */
4763 DPCPU_SET(pmc_sampled, 1);
4767 PMCDBG6(SAM,OPS,1,"cpu=%d pm=%p n=%d fl=%x wr=%d rd=%d", cpu,
4768 pm, ps->ps_nsamples, ps->ps_flags,
4769 (int) (psb->ps_write - psb->ps_samples),
4770 (int) (psb->ps_read - psb->ps_samples));
4773 * If this is a process-mode PMC that is attached to
4774 * its owner, and if the PC is in user mode, update
4775 * profiling statistics like timer-based profiling
4778 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) {
4779 if (ps->ps_flags & PMC_CC_F_USERSPACE) {
4780 td = FIRST_THREAD_IN_PROC(po->po_owner);
4781 addupc_intr(td, ps->ps_pc[0], 1);
4787 * Otherwise, this is either a sampling mode PMC that
4788 * is attached to a different process than its owner,
4789 * or a system-wide sampling PMC. Dispatch a log
4790 * entry to the PMC's owner process.
4792 pmclog_process_callchain(pm, ps);
4795 ps->ps_nsamples = 0; /* mark entry as free */
4796 counter_u64_add(pm->pm_runcount, -1);
4798 /* increment read pointer, modulo sample size */
4799 if (++ps == psb->ps_fence)
4800 psb->ps_read = psb->ps_samples;
4805 counter_u64_add(pmc_stats.pm_log_sweeps, 1);
4807 /* Do not re-enable stalled PMCs if we failed to process any samples */
4812 * Restart any stalled sampling PMCs on this CPU.
4814 * If the NMI handler sets the pm_stalled field of a PMC after
4815 * the check below, we'll end up processing the stalled PMC at
4816 * the next hardclock tick.
4818 for (n = 0; n < md->pmd_npmc; n++) {
4819 pcd = pmc_ri_to_classdep(md, n, &adjri);
4820 KASSERT(pcd != NULL,
4821 ("[pmc,%d] null pcd ri=%d", __LINE__, n));
4822 (void) (*pcd->pcd_get_config)(cpu,adjri,&pm);
4824 if (pm == NULL || /* !cfg'ed */
4825 pm->pm_state != PMC_STATE_RUNNING || /* !active */
4826 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) || /* !sampling */
4827 !pm->pm_pcpu_state[cpu].pps_cpustate || /* !desired */
4828 !pm->pm_pcpu_state[cpu].pps_stalled) /* !stalled */
4831 pm->pm_pcpu_state[cpu].pps_stalled = 0;
4832 (*pcd->pcd_start_pmc)(cpu, adjri);
4841 * Handle a process exit.
4843 * Remove this process from all hash tables. If this process
4844 * owned any PMCs, turn off those PMCs and deallocate them,
4845 * removing any associations with target processes.
4847 * This function will be called by the last 'thread' of a
4850 * XXX This eventhandler gets called early in the exit process.
4851 * Consider using a 'hook' invocation from thread_exit() or equivalent
4852 * spot. Another negative is that kse_exit doesn't seem to call
4858 pmc_process_exit(void *arg __unused, struct proc *p)
4863 int is_using_hwpmcs;
4864 struct pmc_owner *po;
4865 struct pmc_process *pp;
4866 struct pmc_classdep *pcd;
4867 pmc_value_t newvalue, tmp;
4870 is_using_hwpmcs = p->p_flag & P_HWPMC;
4874 * Log a sysexit event to all SS PMC owners.
4876 epoch_enter_preempt(global_epoch_preempt);
4877 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
4878 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
4879 pmclog_process_sysexit(po, p->p_pid);
4880 epoch_exit_preempt(global_epoch_preempt);
4882 if (!is_using_hwpmcs)
4886 PMCDBG3(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid,
4890 * Since this code is invoked by the last thread in an exiting
4891 * process, we would have context switched IN at some prior
4892 * point. However, with PREEMPTION, kernel mode context
4893 * switches may happen any time, so we want to disable a
4894 * context switch OUT till we get any PMCs targeting this
4895 * process off the hardware.
4897 * We also need to atomically remove this process'
4898 * entry from our target process hash table, using
4901 PMCDBG3(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid,
4904 critical_enter(); /* no preemption */
4906 cpu = curthread->td_oncpu;
4908 if ((pp = pmc_find_process_descriptor(p,
4909 PMC_FLAG_REMOVE)) != NULL) {
4912 "process-exit proc=%p pmc-process=%p", p, pp);
4915 * The exiting process could the target of
4916 * some PMCs which will be running on
4917 * currently executing CPU.
4919 * We need to turn these PMCs off like we
4920 * would do at context switch OUT time.
4922 for (ri = 0; ri < md->pmd_npmc; ri++) {
4925 * Pick up the pmc pointer from hardware
4926 * state similar to the CSW_OUT code.
4930 pcd = pmc_ri_to_classdep(md, ri, &adjri);
4932 (void) (*pcd->pcd_get_config)(cpu, adjri, &pm);
4934 PMCDBG2(PRC,EXT,2, "ri=%d pm=%p", ri, pm);
4937 !PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
4940 PMCDBG4(PRC,EXT,2, "ppmcs[%d]=%p pm=%p "
4941 "state=%d", ri, pp->pp_pmcs[ri].pp_pmc,
4944 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
4945 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
4946 __LINE__, PMC_TO_ROWINDEX(pm), ri));
4948 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
4949 ("[pmc,%d] pm %p != pp_pmcs[%d] %p",
4950 __LINE__, pm, ri, pp->pp_pmcs[ri].pp_pmc));
4952 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,
4953 ("[pmc,%d] bad runcount ri %d rc %ld",
4954 __LINE__, ri, (unsigned long)counter_u64_fetch(pm->pm_runcount)));
4957 * Change desired state, and then stop if not
4958 * stalled. This two-step dance should avoid
4959 * race conditions where an interrupt re-enables
4960 * the PMC after this code has already checked
4961 * the pm_stalled flag.
4963 if (pm->pm_pcpu_state[cpu].pps_cpustate) {
4964 pm->pm_pcpu_state[cpu].pps_cpustate = 0;
4965 if (!pm->pm_pcpu_state[cpu].pps_stalled) {
4966 (void) pcd->pcd_stop_pmc(cpu, adjri);
4968 if (PMC_TO_MODE(pm) == PMC_MODE_TC) {
4969 pcd->pcd_read_pmc(cpu, adjri,
4972 PMC_PCPU_SAVED(cpu,ri);
4974 mtx_pool_lock_spin(pmc_mtxpool,
4976 pm->pm_gv.pm_savedvalue += tmp;
4977 pp->pp_pmcs[ri].pp_pmcval +=
4979 mtx_pool_unlock_spin(
4985 counter_u64_add(pm->pm_runcount, -1);
4987 KASSERT((int) counter_u64_fetch(pm->pm_runcount) >= 0,
4988 ("[pmc,%d] runcount is %d", __LINE__, ri));
4990 (void) pcd->pcd_config_pmc(cpu, adjri, NULL);
4994 * Inform the MD layer of this pseudo "context switch
4997 (void) md->pmd_switch_out(pmc_pcpu[cpu], pp);
4999 critical_exit(); /* ok to be pre-empted now */
5002 * Unlink this process from the PMCs that are
5003 * targeting it. This will send a signal to
5004 * all PMC owner's whose PMCs are orphaned.
5006 * Log PMC value at exit time if requested.
5008 for (ri = 0; ri < md->pmd_npmc; ri++)
5009 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
5010 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
5011 PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm)))
5012 pmclog_process_procexit(pm, pp);
5013 pmc_unlink_target_process(pm, pp);
5018 critical_exit(); /* pp == NULL */
5022 * If the process owned PMCs, free them up and free up
5025 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
5026 pmc_remove_owner(po);
5027 pmc_destroy_owner_descriptor(po);
5030 sx_xunlock(&pmc_sx);
5034 * Handle a process fork.
5036 * If the parent process 'p1' is under HWPMC monitoring, then copy
5037 * over any attached PMCs that have 'do_descendants' semantics.
5041 pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *newproc,
5044 int is_using_hwpmcs;
5046 uint32_t do_descendants;
5048 struct pmc_owner *po;
5049 struct pmc_process *ppnew, *ppold;
5051 (void) flags; /* unused parameter */
5054 is_using_hwpmcs = p1->p_flag & P_HWPMC;
5058 * If there are system-wide sampling PMCs active, we need to
5059 * log all fork events to their owner's logs.
5061 epoch_enter_preempt(global_epoch_preempt);
5062 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
5063 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
5064 pmclog_process_procfork(po, p1->p_pid, newproc->p_pid);
5065 epoch_exit_preempt(global_epoch_preempt);
5067 if (!is_using_hwpmcs)
5071 PMCDBG4(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1,
5072 p1->p_pid, p1->p_comm, newproc);
5075 * If the parent process (curthread->td_proc) is a
5076 * target of any PMCs, look for PMCs that are to be
5077 * inherited, and link these into the new process
5080 if ((ppold = pmc_find_process_descriptor(curthread->td_proc,
5081 PMC_FLAG_NONE)) == NULL)
5082 goto done; /* nothing to do */
5085 for (ri = 0; ri < md->pmd_npmc; ri++)
5086 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL)
5087 do_descendants |= pm->pm_flags & PMC_F_DESCENDANTS;
5088 if (do_descendants == 0) /* nothing to do */
5092 * Now mark the new process as being tracked by this driver.
5095 newproc->p_flag |= P_HWPMC;
5096 PROC_UNLOCK(newproc);
5098 /* allocate a descriptor for the new process */
5099 if ((ppnew = pmc_find_process_descriptor(newproc,
5100 PMC_FLAG_ALLOCATE)) == NULL)
5104 * Run through all PMCs that were targeting the old process
5105 * and which specified F_DESCENDANTS and attach them to the
5108 * Log the fork event to all owners of PMCs attached to this
5109 * process, if not already logged.
5111 for (ri = 0; ri < md->pmd_npmc; ri++)
5112 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL &&
5113 (pm->pm_flags & PMC_F_DESCENDANTS)) {
5114 pmc_link_target_process(pm, ppnew);
5116 if (po->po_sscount == 0 &&
5117 po->po_flags & PMC_PO_OWNS_LOGFILE)
5118 pmclog_process_procfork(po, p1->p_pid,
5123 sx_xunlock(&pmc_sx);
5127 pmc_kld_load(void *arg __unused, linker_file_t lf)
5129 struct pmc_owner *po;
5132 * Notify owners of system sampling PMCs about KLD operations.
5134 epoch_enter_preempt(global_epoch_preempt);
5135 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
5136 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
5137 pmclog_process_map_in(po, (pid_t) -1,
5138 (uintfptr_t) lf->address, lf->filename);
5139 epoch_exit_preempt(global_epoch_preempt);
5142 * TODO: Notify owners of (all) process-sampling PMCs too.
5147 pmc_kld_unload(void *arg __unused, const char *filename __unused,
5148 caddr_t address, size_t size)
5150 struct pmc_owner *po;
5152 epoch_enter_preempt(global_epoch_preempt);
5153 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
5154 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
5155 pmclog_process_map_out(po, (pid_t) -1,
5156 (uintfptr_t) address, (uintfptr_t) address + size);
5157 epoch_exit_preempt(global_epoch_preempt);
5160 * TODO: Notify owners of process-sampling PMCs.
5168 pmc_name_of_pmcclass(enum pmc_class class)
5173 #define __PMC_CLASS(S,V,D) \
5174 case PMC_CLASS_##S: \
5178 return ("<unknown>");
5183 * Base class initializer: allocate structure and set default classes.
5186 pmc_mdep_alloc(int nclasses)
5188 struct pmc_mdep *md;
5191 /* SOFT + md classes */
5193 md = malloc(sizeof(struct pmc_mdep) + n *
5194 sizeof(struct pmc_classdep), M_PMC, M_WAITOK|M_ZERO);
5197 /* Add base class. */
5198 pmc_soft_initialize(md);
5203 pmc_mdep_free(struct pmc_mdep *md)
5205 pmc_soft_finalize(md);
5210 generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
5212 (void) pc; (void) pp;
5218 generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
5220 (void) pc; (void) pp;
5225 static struct pmc_mdep *
5226 pmc_generic_cpu_initialize(void)
5228 struct pmc_mdep *md;
5230 md = pmc_mdep_alloc(0);
5232 md->pmd_cputype = PMC_CPU_GENERIC;
5234 md->pmd_pcpu_init = NULL;
5235 md->pmd_pcpu_fini = NULL;
5236 md->pmd_switch_in = generic_switch_in;
5237 md->pmd_switch_out = generic_switch_out;
5243 pmc_generic_cpu_finalize(struct pmc_mdep *md)
5250 pmc_initialize(void)
5252 int c, cpu, error, n, ri;
5253 unsigned int maxcpu, domain;
5255 struct pmc_binding pb;
5256 struct pmc_sample *ps;
5257 struct pmc_classdep *pcd;
5258 struct pmc_samplebuffer *sb;
5263 pmc_stats.pm_intr_ignored = counter_u64_alloc(M_WAITOK);
5264 pmc_stats.pm_intr_processed = counter_u64_alloc(M_WAITOK);
5265 pmc_stats.pm_intr_bufferfull = counter_u64_alloc(M_WAITOK);
5266 pmc_stats.pm_syscalls = counter_u64_alloc(M_WAITOK);
5267 pmc_stats.pm_syscall_errors = counter_u64_alloc(M_WAITOK);
5268 pmc_stats.pm_buffer_requests = counter_u64_alloc(M_WAITOK);
5269 pmc_stats.pm_buffer_requests_failed = counter_u64_alloc(M_WAITOK);
5270 pmc_stats.pm_log_sweeps = counter_u64_alloc(M_WAITOK);
5273 /* parse debug flags first */
5274 if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags",
5275 pmc_debugstr, sizeof(pmc_debugstr)))
5276 pmc_debugflags_parse(pmc_debugstr,
5277 pmc_debugstr+strlen(pmc_debugstr));
5280 PMCDBG1(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION);
5282 /* check kernel version */
5283 if (pmc_kernel_version != PMC_VERSION) {
5284 if (pmc_kernel_version == 0)
5285 printf("hwpmc: this kernel has not been compiled with "
5286 "'options HWPMC_HOOKS'.\n");
5288 printf("hwpmc: kernel version (0x%x) does not match "
5289 "module version (0x%x).\n", pmc_kernel_version,
5291 return EPROGMISMATCH;
5295 * check sysctl parameters
5298 if (pmc_hashsize <= 0) {
5299 (void) printf("hwpmc: tunable \"hashsize\"=%d must be "
5300 "greater than zero.\n", pmc_hashsize);
5301 pmc_hashsize = PMC_HASH_SIZE;
5304 if (pmc_nsamples <= 0 || pmc_nsamples > 65535) {
5305 (void) printf("hwpmc: tunable \"nsamples\"=%d out of "
5306 "range.\n", pmc_nsamples);
5307 pmc_nsamples = PMC_NSAMPLES;
5310 if (pmc_callchaindepth <= 0 ||
5311 pmc_callchaindepth > PMC_CALLCHAIN_DEPTH_MAX) {
5312 (void) printf("hwpmc: tunable \"callchaindepth\"=%d out of "
5313 "range - using %d.\n", pmc_callchaindepth,
5314 PMC_CALLCHAIN_DEPTH_MAX);
5315 pmc_callchaindepth = PMC_CALLCHAIN_DEPTH_MAX;
5318 md = pmc_md_initialize();
5320 /* Default to generic CPU. */
5321 md = pmc_generic_cpu_initialize();
5326 KASSERT(md->pmd_nclass >= 1 && md->pmd_npmc >= 1,
5327 ("[pmc,%d] no classes or pmcs", __LINE__));
5329 /* Compute the map from row-indices to classdep pointers. */
5330 pmc_rowindex_to_classdep = malloc(sizeof(struct pmc_classdep *) *
5331 md->pmd_npmc, M_PMC, M_WAITOK|M_ZERO);
5333 for (n = 0; n < md->pmd_npmc; n++)
5334 pmc_rowindex_to_classdep[n] = NULL;
5335 for (ri = c = 0; c < md->pmd_nclass; c++) {
5336 pcd = &md->pmd_classdep[c];
5337 for (n = 0; n < pcd->pcd_num; n++, ri++)
5338 pmc_rowindex_to_classdep[ri] = pcd;
5341 KASSERT(ri == md->pmd_npmc,
5342 ("[pmc,%d] npmc miscomputed: ri=%d, md->npmc=%d", __LINE__,
5345 maxcpu = pmc_cpu_max();
5347 /* allocate space for the per-cpu array */
5348 pmc_pcpu = malloc(maxcpu * sizeof(struct pmc_cpu *), M_PMC,
5351 /* per-cpu 'saved values' for managing process-mode PMCs */
5352 pmc_pcpu_saved = malloc(sizeof(pmc_value_t) * maxcpu * md->pmd_npmc,
5355 /* Perform CPU-dependent initialization. */
5356 pmc_save_cpu_binding(&pb);
5358 for (cpu = 0; error == 0 && cpu < maxcpu; cpu++) {
5359 if (!pmc_cpu_is_active(cpu))
5361 pmc_select_cpu(cpu);
5362 pmc_pcpu[cpu] = malloc(sizeof(struct pmc_cpu) +
5363 md->pmd_npmc * sizeof(struct pmc_hw *), M_PMC,
5365 if (md->pmd_pcpu_init)
5366 error = md->pmd_pcpu_init(md, cpu);
5367 for (n = 0; error == 0 && n < md->pmd_nclass; n++)
5368 error = md->pmd_classdep[n].pcd_pcpu_init(md, cpu);
5370 pmc_restore_cpu_binding(&pb);
5375 /* allocate space for the sample array */
5376 for (cpu = 0; cpu < maxcpu; cpu++) {
5377 if (!pmc_cpu_is_active(cpu))
5379 pc = pcpu_find(cpu);
5380 domain = pc->pc_domain;
5381 sb = malloc_domain(sizeof(struct pmc_samplebuffer) +
5382 pmc_nsamples * sizeof(struct pmc_sample), M_PMC, domain,
5384 sb->ps_read = sb->ps_write = sb->ps_samples;
5385 sb->ps_fence = sb->ps_samples + pmc_nsamples;
5387 KASSERT(pmc_pcpu[cpu] != NULL,
5388 ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
5390 sb->ps_callchains = malloc_domain(pmc_callchaindepth * pmc_nsamples *
5391 sizeof(uintptr_t), M_PMC, domain, M_WAITOK|M_ZERO);
5393 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
5394 ps->ps_pc = sb->ps_callchains +
5395 (n * pmc_callchaindepth);
5397 pmc_pcpu[cpu]->pc_sb[PMC_HR] = sb;
5399 sb = malloc_domain(sizeof(struct pmc_samplebuffer) +
5400 pmc_nsamples * sizeof(struct pmc_sample), M_PMC, domain,
5402 sb->ps_read = sb->ps_write = sb->ps_samples;
5403 sb->ps_fence = sb->ps_samples + pmc_nsamples;
5405 KASSERT(pmc_pcpu[cpu] != NULL,
5406 ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
5408 sb->ps_callchains = malloc_domain(pmc_callchaindepth * pmc_nsamples *
5409 sizeof(uintptr_t), M_PMC, domain, M_WAITOK|M_ZERO);
5411 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
5412 ps->ps_pc = sb->ps_callchains +
5413 (n * pmc_callchaindepth);
5415 pmc_pcpu[cpu]->pc_sb[PMC_SR] = sb;
5418 /* allocate space for the row disposition array */
5419 pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc,
5420 M_PMC, M_WAITOK|M_ZERO);
5422 /* mark all PMCs as available */
5423 for (n = 0; n < (int) md->pmd_npmc; n++)
5424 PMC_MARK_ROW_FREE(n);
5426 /* allocate thread hash tables */
5427 pmc_ownerhash = hashinit(pmc_hashsize, M_PMC,
5428 &pmc_ownerhashmask);
5430 pmc_processhash = hashinit(pmc_hashsize, M_PMC,
5431 &pmc_processhashmask);
5432 mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf",
5435 LIST_INIT(&pmc_ss_owners);
5438 /* allocate a pool of spin mutexes */
5439 pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size,
5442 PMCDBG4(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx "
5443 "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask,
5444 pmc_processhash, pmc_processhashmask);
5446 /* Initialize a spin mutex for the thread free list. */
5447 mtx_init(&pmc_threadfreelist_mtx, "pmc-threadfreelist", "pmc-leaf",
5451 * Initialize the callout to monitor the thread free list.
5452 * This callout will also handle the initial population of the list.
5454 taskqgroup_config_gtask_init(NULL, &free_gtask, pmc_thread_descriptor_pool_free_task, "thread descriptor pool free task");
5456 /* register process {exit,fork,exec} handlers */
5457 pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit,
5458 pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY);
5459 pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork,
5460 pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY);
5462 /* register kld event handlers */
5463 pmc_kld_load_tag = EVENTHANDLER_REGISTER(kld_load, pmc_kld_load,
5464 NULL, EVENTHANDLER_PRI_ANY);
5465 pmc_kld_unload_tag = EVENTHANDLER_REGISTER(kld_unload, pmc_kld_unload,
5466 NULL, EVENTHANDLER_PRI_ANY);
5468 /* initialize logging */
5469 pmclog_initialize();
5471 /* set hook functions */
5472 pmc_intr = md->pmd_intr;
5474 pmc_hook = pmc_hook_handler;
5477 printf(PMC_MODULE_NAME ":");
5478 for (n = 0; n < (int) md->pmd_nclass; n++) {
5479 pcd = &md->pmd_classdep[n];
5480 printf(" %s/%d/%d/0x%b",
5481 pmc_name_of_pmcclass(pcd->pcd_class),
5486 "\1INT\2USR\3SYS\4EDG\5THR"
5487 "\6REA\7WRI\10INV\11QUA\12PRC"
5496 /* prepare to be unloaded */
5501 unsigned int maxcpu;
5502 struct pmc_ownerhash *ph;
5503 struct pmc_owner *po, *tmp;
5504 struct pmc_binding pb;
5506 struct pmc_processhash *prh;
5509 PMCDBG0(MOD,INI,0, "cleanup");
5511 /* switch off sampling */
5513 DPCPU_ID_SET(cpu, pmc_sampled, 0);
5517 if (pmc_hook == NULL) { /* being unloaded already */
5518 sx_xunlock(&pmc_sx);
5522 pmc_hook = NULL; /* prevent new threads from entering module */
5524 /* deregister event handlers */
5525 EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag);
5526 EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag);
5527 EVENTHANDLER_DEREGISTER(kld_load, pmc_kld_load_tag);
5528 EVENTHANDLER_DEREGISTER(kld_unload, pmc_kld_unload_tag);
5530 /* send SIGBUS to all owner threads, free up allocations */
5532 for (ph = pmc_ownerhash;
5533 ph <= &pmc_ownerhash[pmc_ownerhashmask];
5535 LIST_FOREACH_SAFE(po, ph, po_next, tmp) {
5536 pmc_remove_owner(po);
5538 /* send SIGBUS to owner processes */
5539 PMCDBG3(MOD,INI,2, "cleanup signal proc=%p "
5540 "(%d, %s)", po->po_owner,
5541 po->po_owner->p_pid,
5542 po->po_owner->p_comm);
5544 PROC_LOCK(po->po_owner);
5545 kern_psignal(po->po_owner, SIGBUS);
5546 PROC_UNLOCK(po->po_owner);
5548 pmc_destroy_owner_descriptor(po);
5552 /* reclaim allocated data structures */
5553 mtx_destroy(&pmc_threadfreelist_mtx);
5554 pmc_thread_descriptor_pool_drain();
5557 mtx_pool_destroy(&pmc_mtxpool);
5559 mtx_destroy(&pmc_processhash_mtx);
5560 if (pmc_processhash) {
5562 struct pmc_process *pp;
5564 PMCDBG0(MOD,INI,3, "destroy process hash");
5565 for (prh = pmc_processhash;
5566 prh <= &pmc_processhash[pmc_processhashmask];
5568 LIST_FOREACH(pp, prh, pp_next)
5569 PMCDBG1(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid);
5572 hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask);
5573 pmc_processhash = NULL;
5576 if (pmc_ownerhash) {
5577 PMCDBG0(MOD,INI,3, "destroy owner hash");
5578 hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask);
5579 pmc_ownerhash = NULL;
5582 KASSERT(LIST_EMPTY(&pmc_ss_owners),
5583 ("[pmc,%d] Global SS owner list not empty", __LINE__));
5584 KASSERT(pmc_ss_count == 0,
5585 ("[pmc,%d] Global SS count not empty", __LINE__));
5587 /* do processor and pmc-class dependent cleanup */
5588 maxcpu = pmc_cpu_max();
5590 PMCDBG0(MOD,INI,3, "md cleanup");
5592 pmc_save_cpu_binding(&pb);
5593 for (cpu = 0; cpu < maxcpu; cpu++) {
5594 PMCDBG2(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p",
5595 cpu, pmc_pcpu[cpu]);
5596 if (!pmc_cpu_is_active(cpu) || pmc_pcpu[cpu] == NULL)
5598 pmc_select_cpu(cpu);
5599 for (c = 0; c < md->pmd_nclass; c++)
5600 md->pmd_classdep[c].pcd_pcpu_fini(md, cpu);
5601 if (md->pmd_pcpu_fini)
5602 md->pmd_pcpu_fini(md, cpu);
5605 if (md->pmd_cputype == PMC_CPU_GENERIC)
5606 pmc_generic_cpu_finalize(md);
5608 pmc_md_finalize(md);
5612 pmc_restore_cpu_binding(&pb);
5615 /* Free per-cpu descriptors. */
5616 for (cpu = 0; cpu < maxcpu; cpu++) {
5617 if (!pmc_cpu_is_active(cpu))
5619 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_HR] != NULL,
5620 ("[pmc,%d] Null hw cpu sample buffer cpu=%d", __LINE__,
5622 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_SR] != NULL,
5623 ("[pmc,%d] Null sw cpu sample buffer cpu=%d", __LINE__,
5625 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_HR]->ps_callchains, M_PMC);
5626 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_HR], M_PMC);
5627 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_SR]->ps_callchains, M_PMC);
5628 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_SR], M_PMC);
5629 free_domain(pmc_pcpu[cpu], M_PMC);
5632 free(pmc_pcpu, M_PMC);
5635 free(pmc_pcpu_saved, M_PMC);
5636 pmc_pcpu_saved = NULL;
5639 free(pmc_pmcdisp, M_PMC);
5643 if (pmc_rowindex_to_classdep) {
5644 free(pmc_rowindex_to_classdep, M_PMC);
5645 pmc_rowindex_to_classdep = NULL;
5649 counter_u64_free(pmc_stats.pm_intr_ignored);
5650 counter_u64_free(pmc_stats.pm_intr_processed);
5651 counter_u64_free(pmc_stats.pm_intr_bufferfull);
5652 counter_u64_free(pmc_stats.pm_syscalls);
5653 counter_u64_free(pmc_stats.pm_syscall_errors);
5654 counter_u64_free(pmc_stats.pm_buffer_requests);
5655 counter_u64_free(pmc_stats.pm_buffer_requests_failed);
5656 counter_u64_free(pmc_stats.pm_log_sweeps);
5657 sx_xunlock(&pmc_sx); /* we are done */
5661 * The function called at load/unload.
5665 load (struct module *module __unused, int cmd, void *arg __unused)
5673 /* initialize the subsystem */
5674 error = pmc_initialize();
5677 PMCDBG2(MOD,INI,1, "syscall=%d maxcpu=%d",
5678 pmc_syscall_num, pmc_cpu_max());
5685 PMCDBG0(MOD,INI,1, "unloaded");
5689 error = EINVAL; /* XXX should panic(9) */