2 * Copyright (c) 2003-2007 Joseph Koshy
3 * Copyright (c) 2007 The FreeBSD Foundation
6 * Portions of this software were developed by A. Joseph Koshy under
7 * sponsorship from the FreeBSD Foundation and Google, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/eventhandler.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/limits.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/mutex.h>
46 #include <sys/pmckern.h>
47 #include <sys/pmclog.h>
50 #include <sys/queue.h>
51 #include <sys/resourcevar.h>
52 #include <sys/sched.h>
53 #include <sys/signalvar.h>
56 #include <sys/sysctl.h>
57 #include <sys/sysent.h>
58 #include <sys/systm.h>
59 #include <sys/vnode.h>
61 #include <sys/linker.h> /* needs to be after <sys/malloc.h> */
63 #include <machine/atomic.h>
64 #include <machine/md_var.h>
71 PMC_FLAG_NONE = 0x00, /* do nothing */
72 PMC_FLAG_REMOVE = 0x01, /* atomically remove entry from hash */
73 PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */
77 * The offset in sysent where the syscall is allocated.
80 static int pmc_syscall_num = NO_SYSCALL;
81 struct pmc_cpu **pmc_pcpu; /* per-cpu state */
82 pmc_value_t *pmc_pcpu_saved; /* saved PMC values: CSW handling */
84 #define PMC_PCPU_SAVED(C,R) pmc_pcpu_saved[(R) + md->pmd_npmc*(C)]
86 struct mtx_pool *pmc_mtxpool;
87 static int *pmc_pmcdisp; /* PMC row dispositions */
89 #define PMC_ROW_DISP_IS_FREE(R) (pmc_pmcdisp[(R)] == 0)
90 #define PMC_ROW_DISP_IS_THREAD(R) (pmc_pmcdisp[(R)] > 0)
91 #define PMC_ROW_DISP_IS_STANDALONE(R) (pmc_pmcdisp[(R)] < 0)
93 #define PMC_MARK_ROW_FREE(R) do { \
94 pmc_pmcdisp[(R)] = 0; \
97 #define PMC_MARK_ROW_STANDALONE(R) do { \
98 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
100 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
101 KASSERT(pmc_pmcdisp[(R)] >= (-mp_ncpus), ("[pmc,%d] row " \
102 "disposition error", __LINE__)); \
105 #define PMC_UNMARK_ROW_STANDALONE(R) do { \
106 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
107 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
111 #define PMC_MARK_ROW_THREAD(R) do { \
112 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
114 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
117 #define PMC_UNMARK_ROW_THREAD(R) do { \
118 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
119 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
124 /* various event handlers */
125 static eventhandler_tag pmc_exit_tag, pmc_fork_tag;
127 /* Module statistics */
128 struct pmc_op_getdriverstats pmc_stats;
130 /* Machine/processor dependent operations */
134 * Hash tables mapping owner processes and target threads to PMCs.
137 struct mtx pmc_processhash_mtx; /* spin mutex */
138 static u_long pmc_processhashmask;
139 static LIST_HEAD(pmc_processhash, pmc_process) *pmc_processhash;
142 * Hash table of PMC owner descriptors. This table is protected by
143 * the shared PMC "sx" lock.
146 static u_long pmc_ownerhashmask;
147 static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash;
150 * List of PMC owners with system-wide sampling PMCs.
153 static LIST_HEAD(, pmc_owner) pmc_ss_owners;
161 static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS);
162 static int pmc_debugflags_parse(char *newstr, char *fence);
165 static int load(struct module *module, int cmd, void *arg);
166 static int pmc_attach_process(struct proc *p, struct pmc *pm);
167 static struct pmc *pmc_allocate_pmc_descriptor(void);
168 static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p);
169 static int pmc_attach_one_process(struct proc *p, struct pmc *pm);
170 static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri,
172 static int pmc_can_attach(struct pmc *pm, struct proc *p);
173 static void pmc_capture_user_callchain(int cpu, struct trapframe *tf);
174 static void pmc_cleanup(void);
175 static int pmc_detach_process(struct proc *p, struct pmc *pm);
176 static int pmc_detach_one_process(struct proc *p, struct pmc *pm,
178 static void pmc_destroy_owner_descriptor(struct pmc_owner *po);
179 static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p);
180 static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm);
181 static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po,
183 static struct pmc_process *pmc_find_process_descriptor(struct proc *p,
185 static void pmc_force_context_switch(void);
186 static void pmc_link_target_process(struct pmc *pm,
187 struct pmc_process *pp);
188 static void pmc_log_all_process_mappings(struct pmc_owner *po);
189 static void pmc_log_kernel_mappings(struct pmc *pm);
190 static void pmc_log_process_mappings(struct pmc_owner *po, struct proc *p);
191 static void pmc_maybe_remove_owner(struct pmc_owner *po);
192 static void pmc_process_csw_in(struct thread *td);
193 static void pmc_process_csw_out(struct thread *td);
194 static void pmc_process_exit(void *arg, struct proc *p);
195 static void pmc_process_fork(void *arg, struct proc *p1,
196 struct proc *p2, int n);
197 static void pmc_process_samples(int cpu);
198 static void pmc_release_pmc_descriptor(struct pmc *pmc);
199 static void pmc_remove_owner(struct pmc_owner *po);
200 static void pmc_remove_process_descriptor(struct pmc_process *pp);
201 static void pmc_restore_cpu_binding(struct pmc_binding *pb);
202 static void pmc_save_cpu_binding(struct pmc_binding *pb);
203 static void pmc_select_cpu(int cpu);
204 static int pmc_start(struct pmc *pm);
205 static int pmc_stop(struct pmc *pm);
206 static int pmc_syscall_handler(struct thread *td, void *syscall_args);
207 static void pmc_unlink_target_process(struct pmc *pmc,
208 struct pmc_process *pp);
211 * Kernel tunables and sysctl(8) interface.
214 SYSCTL_NODE(_kern, OID_AUTO, hwpmc, CTLFLAG_RW, 0, "HWPMC parameters");
216 static int pmc_callchaindepth = PMC_CALLCHAIN_DEPTH;
217 TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "callchaindepth", &pmc_callchaindepth);
218 SYSCTL_INT(_kern_hwpmc, OID_AUTO, callchaindepth, CTLFLAG_TUN|CTLFLAG_RD,
219 &pmc_callchaindepth, 0, "depth of call chain records");
222 struct pmc_debugflags pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS;
223 char pmc_debugstr[PMC_DEBUG_STRSIZE];
224 TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,
225 sizeof(pmc_debugstr));
226 SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
227 CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_TUN,
228 0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags");
232 * kern.hwpmc.hashrows -- determines the number of rows in the
233 * of the hash table used to look up threads
236 static int pmc_hashsize = PMC_HASH_SIZE;
237 TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "hashsize", &pmc_hashsize);
238 SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_TUN|CTLFLAG_RD,
239 &pmc_hashsize, 0, "rows in hash tables");
242 * kern.hwpmc.nsamples --- number of PC samples/callchain stacks per CPU
245 static int pmc_nsamples = PMC_NSAMPLES;
246 TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nsamples", &pmc_nsamples);
247 SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_TUN|CTLFLAG_RD,
248 &pmc_nsamples, 0, "number of PC samples per CPU");
252 * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool.
255 static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE;
256 TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "mtxpoolsize", &pmc_mtxpool_size);
257 SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_TUN|CTLFLAG_RD,
258 &pmc_mtxpool_size, 0, "size of spin mutex pool");
262 * security.bsd.unprivileged_syspmcs -- allow non-root processes to
263 * allocate system-wide PMCs.
265 * Allowing unprivileged processes to allocate system PMCs is convenient
266 * if system-wide measurements need to be taken concurrently with other
267 * per-process measurements. This feature is turned off by default.
270 static int pmc_unprivileged_syspmcs = 0;
271 TUNABLE_INT("security.bsd.unprivileged_syspmcs", &pmc_unprivileged_syspmcs);
272 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RW,
273 &pmc_unprivileged_syspmcs, 0,
274 "allow unprivileged process to allocate system PMCs");
277 * Hash function. Discard the lower 2 bits of the pointer since
278 * these are always zero for our uses. The hash multiplier is
279 * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
283 #define _PMC_HM 11400714819323198486u
285 #define _PMC_HM 2654435769u
287 #error Must know the size of 'long' to compile
290 #define PMC_HASH_PTR(P,M) ((((unsigned long) (P) >> 2) * _PMC_HM) & (M))
296 /* The `sysent' for the new syscall */
297 static struct sysent pmc_sysent = {
299 pmc_syscall_handler /* sy_call */
302 static struct syscall_module_data pmc_syscall_mod = {
310 static moduledata_t pmc_mod = {
312 syscall_module_handler,
316 DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY);
317 MODULE_VERSION(pmc, PMC_VERSION);
320 enum pmc_dbgparse_state {
321 PMCDS_WS, /* in whitespace */
322 PMCDS_MAJOR, /* seen a major keyword */
327 pmc_debugflags_parse(char *newstr, char *fence)
330 struct pmc_debugflags *tmpflags;
331 int error, found, *newbits, tmp;
334 MALLOC(tmpflags, struct pmc_debugflags *, sizeof(*tmpflags),
335 M_PMC, M_WAITOK|M_ZERO);
340 for (; p < fence && (c = *p); p++) {
342 /* skip white space */
343 if (c == ' ' || c == '\t')
346 /* look for a keyword followed by "=" */
347 for (q = p; p < fence && (c = *p) && c != '='; p++)
357 /* lookup flag group name */
358 #define DBG_SET_FLAG_MAJ(S,F) \
359 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
360 newbits = &tmpflags->pdb_ ## F;
362 DBG_SET_FLAG_MAJ("cpu", CPU);
363 DBG_SET_FLAG_MAJ("csw", CSW);
364 DBG_SET_FLAG_MAJ("logging", LOG);
365 DBG_SET_FLAG_MAJ("module", MOD);
366 DBG_SET_FLAG_MAJ("md", MDP);
367 DBG_SET_FLAG_MAJ("owner", OWN);
368 DBG_SET_FLAG_MAJ("pmc", PMC);
369 DBG_SET_FLAG_MAJ("process", PRC);
370 DBG_SET_FLAG_MAJ("sampling", SAM);
372 if (newbits == NULL) {
377 p++; /* skip the '=' */
379 /* Now parse the individual flags */
382 for (q = p; p < fence && (c = *p); p++)
383 if (c == ' ' || c == '\t' || c == ',')
386 /* p == fence or c == ws or c == "," or c == 0 */
388 if ((kwlen = p - q) == 0) {
394 #define DBG_SET_FLAG_MIN(S,F) \
395 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
396 tmp |= found = (1 << PMC_DEBUG_MIN_ ## F)
398 /* a '*' denotes all possible flags in the group */
399 if (kwlen == 1 && *q == '*')
401 /* look for individual flag names */
402 DBG_SET_FLAG_MIN("allocaterow", ALR);
403 DBG_SET_FLAG_MIN("allocate", ALL);
404 DBG_SET_FLAG_MIN("attach", ATT);
405 DBG_SET_FLAG_MIN("bind", BND);
406 DBG_SET_FLAG_MIN("config", CFG);
407 DBG_SET_FLAG_MIN("exec", EXC);
408 DBG_SET_FLAG_MIN("exit", EXT);
409 DBG_SET_FLAG_MIN("find", FND);
410 DBG_SET_FLAG_MIN("flush", FLS);
411 DBG_SET_FLAG_MIN("fork", FRK);
412 DBG_SET_FLAG_MIN("getbuf", GTB);
413 DBG_SET_FLAG_MIN("hook", PMH);
414 DBG_SET_FLAG_MIN("init", INI);
415 DBG_SET_FLAG_MIN("intr", INT);
416 DBG_SET_FLAG_MIN("linktarget", TLK);
417 DBG_SET_FLAG_MIN("mayberemove", OMR);
418 DBG_SET_FLAG_MIN("ops", OPS);
419 DBG_SET_FLAG_MIN("read", REA);
420 DBG_SET_FLAG_MIN("register", REG);
421 DBG_SET_FLAG_MIN("release", REL);
422 DBG_SET_FLAG_MIN("remove", ORM);
423 DBG_SET_FLAG_MIN("sample", SAM);
424 DBG_SET_FLAG_MIN("scheduleio", SIO);
425 DBG_SET_FLAG_MIN("select", SEL);
426 DBG_SET_FLAG_MIN("signal", SIG);
427 DBG_SET_FLAG_MIN("swi", SWI);
428 DBG_SET_FLAG_MIN("swo", SWO);
429 DBG_SET_FLAG_MIN("start", STA);
430 DBG_SET_FLAG_MIN("stop", STO);
431 DBG_SET_FLAG_MIN("syscall", PMS);
432 DBG_SET_FLAG_MIN("unlinktarget", TUL);
433 DBG_SET_FLAG_MIN("write", WRI);
435 /* unrecognized flag name */
440 if (c == 0 || c == ' ' || c == '\t') { /* end of flag group */
449 /* save the new flag set */
450 bcopy(tmpflags, &pmc_debugflags, sizeof(pmc_debugflags));
453 FREE(tmpflags, M_PMC);
458 pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS)
460 char *fence, *newstr;
464 (void) arg1; (void) arg2; /* unused parameters */
466 n = sizeof(pmc_debugstr);
467 MALLOC(newstr, char *, n, M_PMC, M_ZERO|M_WAITOK);
468 (void) strlcpy(newstr, pmc_debugstr, n);
470 error = sysctl_handle_string(oidp, newstr, n, req);
472 /* if there is a new string, parse and copy it */
473 if (error == 0 && req->newptr != NULL) {
474 fence = newstr + (n < req->newlen ? n : req->newlen + 1);
475 if ((error = pmc_debugflags_parse(newstr, fence)) == 0)
476 (void) strlcpy(pmc_debugstr, newstr,
477 sizeof(pmc_debugstr));
487 * Concurrency Control
489 * The driver manages the following data structures:
491 * - target process descriptors, one per target process
492 * - owner process descriptors (and attached lists), one per owner process
493 * - lookup hash tables for owner and target processes
494 * - PMC descriptors (and attached lists)
495 * - per-cpu hardware state
496 * - the 'hook' variable through which the kernel calls into
498 * - the machine hardware state (managed by the MD layer)
500 * These data structures are accessed from:
502 * - thread context-switch code
503 * - interrupt handlers (possibly on multiple cpus)
504 * - kernel threads on multiple cpus running on behalf of user
505 * processes doing system calls
506 * - this driver's private kernel threads
508 * = Locks and Locking strategy =
510 * The driver uses four locking strategies for its operation:
512 * - The global SX lock "pmc_sx" is used to protect internal
515 * Calls into the module by syscall() start with this lock being
516 * held in exclusive mode. Depending on the requested operation,
517 * the lock may be downgraded to 'shared' mode to allow more
518 * concurrent readers into the module. Calls into the module from
519 * other parts of the kernel acquire the lock in shared mode.
521 * This SX lock is held in exclusive mode for any operations that
522 * modify the linkages between the driver's internal data structures.
524 * The 'pmc_hook' function pointer is also protected by this lock.
525 * It is only examined with the sx lock held in exclusive mode. The
526 * kernel module is allowed to be unloaded only with the sx lock held
527 * in exclusive mode. In normal syscall handling, after acquiring the
528 * pmc_sx lock we first check that 'pmc_hook' is non-null before
529 * proceeding. This prevents races between the thread unloading the module
530 * and other threads seeking to use the module.
532 * - Lookups of target process structures and owner process structures
533 * cannot use the global "pmc_sx" SX lock because these lookups need
534 * to happen during context switches and in other critical sections
535 * where sleeping is not allowed. We protect these lookup tables
536 * with their own private spin-mutexes, "pmc_processhash_mtx" and
537 * "pmc_ownerhash_mtx".
539 * - Interrupt handlers work in a lock free manner. At interrupt
540 * time, handlers look at the PMC pointer (phw->phw_pmc) configured
541 * when the PMC was started. If this pointer is NULL, the interrupt
542 * is ignored after updating driver statistics. We ensure that this
543 * pointer is set (using an atomic operation if necessary) before the
544 * PMC hardware is started. Conversely, this pointer is unset atomically
545 * only after the PMC hardware is stopped.
547 * We ensure that everything needed for the operation of an
548 * interrupt handler is available without it needing to acquire any
549 * locks. We also ensure that a PMC's software state is destroyed only
550 * after the PMC is taken off hardware (on all CPUs).
552 * - Context-switch handling with process-private PMCs needs more
555 * A given process may be the target of multiple PMCs. For example,
556 * PMCATTACH and PMCDETACH may be requested by a process on one CPU
557 * while the target process is running on another. A PMC could also
558 * be getting released because its owner is exiting. We tackle
559 * these situations in the following manner:
561 * - each target process structure 'pmc_process' has an array
562 * of 'struct pmc *' pointers, one for each hardware PMC.
564 * - At context switch IN time, each "target" PMC in RUNNING state
565 * gets started on hardware and a pointer to each PMC is copied into
566 * the per-cpu phw array. The 'runcount' for the PMC is
569 * - At context switch OUT time, all process-virtual PMCs are stopped
570 * on hardware. The saved value is added to the PMCs value field
571 * only if the PMC is in a non-deleted state (the PMCs state could
572 * have changed during the current time slice).
574 * Note that since in-between a switch IN on a processor and a switch
575 * OUT, the PMC could have been released on another CPU. Therefore
576 * context switch OUT always looks at the hardware state to turn
577 * OFF PMCs and will update a PMC's saved value only if reachable
578 * from the target process record.
580 * - OP PMCRELEASE could be called on a PMC at any time (the PMC could
581 * be attached to many processes at the time of the call and could
582 * be active on multiple CPUs).
584 * We prevent further scheduling of the PMC by marking it as in
585 * state 'DELETED'. If the runcount of the PMC is non-zero then
586 * this PMC is currently running on a CPU somewhere. The thread
587 * doing the PMCRELEASE operation waits by repeatedly doing a
588 * pause() till the runcount comes to zero.
590 * The contents of a PMC descriptor (struct pmc) are protected using
591 * a spin-mutex. In order to save space, we use a mutex pool.
593 * In terms of lock types used by witness(4), we use:
594 * - Type "pmc-sx", used by the global SX lock.
595 * - Type "pmc-sleep", for sleep mutexes used by logger threads.
596 * - Type "pmc-per-proc", for protecting PMC owner descriptors.
597 * - Type "pmc-leaf", used for all other spin mutexes.
601 * save the cpu binding of the current kthread
605 pmc_save_cpu_binding(struct pmc_binding *pb)
607 PMCDBG(CPU,BND,2, "%s", "save-cpu");
608 thread_lock(curthread);
609 pb->pb_bound = sched_is_bound(curthread);
610 pb->pb_cpu = curthread->td_oncpu;
611 thread_unlock(curthread);
612 PMCDBG(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu);
616 * restore the cpu binding of the current thread
620 pmc_restore_cpu_binding(struct pmc_binding *pb)
622 PMCDBG(CPU,BND,2, "restore-cpu curcpu=%d restore=%d",
623 curthread->td_oncpu, pb->pb_cpu);
624 thread_lock(curthread);
626 sched_bind(curthread, pb->pb_cpu);
628 sched_unbind(curthread);
629 thread_unlock(curthread);
630 PMCDBG(CPU,BND,2, "%s", "restore-cpu done");
634 * move execution over the specified cpu and bind it there.
638 pmc_select_cpu(int cpu)
640 KASSERT(cpu >= 0 && cpu < mp_ncpus,
641 ("[pmc,%d] bad cpu number %d", __LINE__, cpu));
643 /* never move to a disabled CPU */
644 KASSERT(pmc_cpu_is_disabled(cpu) == 0, ("[pmc,%d] selecting "
645 "disabled CPU %d", __LINE__, cpu));
647 PMCDBG(CPU,SEL,2, "select-cpu cpu=%d", cpu);
648 thread_lock(curthread);
649 sched_bind(curthread, cpu);
650 thread_unlock(curthread);
652 KASSERT(curthread->td_oncpu == cpu,
653 ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__,
654 cpu, curthread->td_oncpu));
656 PMCDBG(CPU,SEL,2, "select-cpu cpu=%d ok", cpu);
660 * Force a context switch.
662 * We do this by pause'ing for 1 tick -- invoking mi_switch() is not
663 * guaranteed to force a context switch.
667 pmc_force_context_switch(void)
674 * Get the file name for an executable. This is a simple wrapper
675 * around vn_fullpath(9).
679 pmc_getfilename(struct vnode *v, char **fullpath, char **freepath)
684 *fullpath = "unknown";
686 vn_lock(v, LK_CANRECURSE | LK_EXCLUSIVE | LK_RETRY, td);
687 vn_fullpath(td, v, fullpath, freepath);
688 VOP_UNLOCK(v, 0, td);
692 * remove an process owning PMCs
696 pmc_remove_owner(struct pmc_owner *po)
698 struct pmc *pm, *tmp;
700 sx_assert(&pmc_sx, SX_XLOCKED);
702 PMCDBG(OWN,ORM,1, "remove-owner po=%p", po);
704 /* Remove descriptor from the owner hash table */
705 LIST_REMOVE(po, po_next);
707 /* release all owned PMC descriptors */
708 LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) {
709 PMCDBG(OWN,ORM,2, "pmc=%p", pm);
710 KASSERT(pm->pm_owner == po,
711 ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po));
713 pmc_release_pmc_descriptor(pm); /* will unlink from the list */
716 KASSERT(po->po_sscount == 0,
717 ("[pmc,%d] SS count not zero", __LINE__));
718 KASSERT(LIST_EMPTY(&po->po_pmcs),
719 ("[pmc,%d] PMC list not empty", __LINE__));
721 /* de-configure the log file if present */
722 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
723 pmclog_deconfigure_log(po);
727 * remove an owner process record if all conditions are met.
731 pmc_maybe_remove_owner(struct pmc_owner *po)
734 PMCDBG(OWN,OMR,1, "maybe-remove-owner po=%p", po);
737 * Remove owner record if
738 * - this process does not own any PMCs
739 * - this process has not allocated a system-wide sampling buffer
742 if (LIST_EMPTY(&po->po_pmcs) &&
743 ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)) {
744 pmc_remove_owner(po);
745 pmc_destroy_owner_descriptor(po);
750 * Add an association between a target process and a PMC.
754 pmc_link_target_process(struct pmc *pm, struct pmc_process *pp)
757 struct pmc_target *pt;
759 sx_assert(&pmc_sx, SX_XLOCKED);
761 KASSERT(pm != NULL && pp != NULL,
762 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
763 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
764 ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d",
765 __LINE__, pm, pp->pp_proc->p_pid));
766 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < ((int) md->pmd_npmc - 1),
767 ("[pmc,%d] Illegal reference count %d for process record %p",
768 __LINE__, pp->pp_refcnt, (void *) pp));
770 ri = PMC_TO_ROWINDEX(pm);
772 PMCDBG(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p",
776 LIST_FOREACH(pt, &pm->pm_targets, pt_next)
777 if (pt->pt_process == pp)
778 KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets",
782 MALLOC(pt, struct pmc_target *, sizeof(struct pmc_target),
783 M_PMC, M_ZERO|M_WAITOK);
787 LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next);
789 atomic_store_rel_ptr((uintptr_t *)&pp->pp_pmcs[ri].pp_pmc,
792 if (pm->pm_owner->po_owner == pp->pp_proc)
793 pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER;
796 * Initialize the per-process values at this row index.
798 pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ?
799 pm->pm_sc.pm_reloadcount : 0;
806 * Removes the association between a target process and a PMC.
810 pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
814 struct pmc_target *ptgt;
816 sx_assert(&pmc_sx, SX_XLOCKED);
818 KASSERT(pm != NULL && pp != NULL,
819 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
821 KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt < (int) md->pmd_npmc,
822 ("[pmc,%d] Illegal ref count %d on process record %p",
823 __LINE__, pp->pp_refcnt, (void *) pp));
825 ri = PMC_TO_ROWINDEX(pm);
827 PMCDBG(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p",
830 KASSERT(pp->pp_pmcs[ri].pp_pmc == pm,
831 ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__,
832 ri, pm, pp->pp_pmcs[ri].pp_pmc));
834 pp->pp_pmcs[ri].pp_pmc = NULL;
835 pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0;
837 /* Remove owner-specific flags */
838 if (pm->pm_owner->po_owner == pp->pp_proc) {
839 pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS;
840 pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER;
845 /* Remove the target process from the PMC structure */
846 LIST_FOREACH(ptgt, &pm->pm_targets, pt_next)
847 if (ptgt->pt_process == pp)
850 KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found "
851 "in pmc %p", __LINE__, pp->pp_proc, pp, pm));
853 LIST_REMOVE(ptgt, pt_next);
856 /* if the PMC now lacks targets, send the owner a SIGIO */
857 if (LIST_EMPTY(&pm->pm_targets)) {
858 p = pm->pm_owner->po_owner;
863 PMCDBG(PRC,SIG,2, "signalling proc=%p signal=%d", p,
869 * Check if PMC 'pm' may be attached to target process 't'.
873 pmc_can_attach(struct pmc *pm, struct proc *t)
875 struct proc *o; /* pmc owner */
876 struct ucred *oc, *tc; /* owner, target credentials */
877 int decline_attach, i;
880 * A PMC's owner can always attach that PMC to itself.
883 if ((o = pm->pm_owner->po_owner) == t)
897 * The effective uid of the PMC owner should match at least one
898 * of the {effective,real,saved} uids of the target process.
901 decline_attach = oc->cr_uid != tc->cr_uid &&
902 oc->cr_uid != tc->cr_svuid &&
903 oc->cr_uid != tc->cr_ruid;
906 * Every one of the target's group ids, must be in the owner's
909 for (i = 0; !decline_attach && i < tc->cr_ngroups; i++)
910 decline_attach = !groupmember(tc->cr_groups[i], oc);
912 /* check the read and saved gids too */
913 if (decline_attach == 0)
914 decline_attach = !groupmember(tc->cr_rgid, oc) ||
915 !groupmember(tc->cr_svgid, oc);
920 return !decline_attach;
924 * Attach a process to a PMC.
928 pmc_attach_one_process(struct proc *p, struct pmc *pm)
931 char *fullpath, *freepath;
932 struct pmc_process *pp;
934 sx_assert(&pmc_sx, SX_XLOCKED);
936 PMCDBG(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm,
937 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
940 * Locate the process descriptor corresponding to process 'p',
941 * allocating space as needed.
943 * Verify that rowindex 'pm_rowindex' is free in the process
946 * If not, allocate space for a descriptor and link the
947 * process descriptor and PMC.
949 ri = PMC_TO_ROWINDEX(pm);
951 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL)
954 if (pp->pp_pmcs[ri].pp_pmc == pm) /* already present at slot [ri] */
957 if (pp->pp_pmcs[ri].pp_pmc != NULL)
960 pmc_link_target_process(pm, pp);
962 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) &&
963 (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) == 0)
964 pm->pm_flags |= PMC_F_NEEDS_LOGFILE;
966 pm->pm_flags |= PMC_F_ATTACH_DONE; /* mark as attached */
968 /* issue an attach event to a configured log file */
969 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) {
970 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
971 pmclog_process_pmcattach(pm, p->p_pid, fullpath);
973 FREE(freepath, M_TEMP);
974 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
975 pmc_log_process_mappings(pm->pm_owner, p);
977 /* mark process as using HWPMCs */
979 p->p_flag |= P_HWPMC;
986 * Attach a process and optionally its children
990 pmc_attach_process(struct proc *p, struct pmc *pm)
995 sx_assert(&pmc_sx, SX_XLOCKED);
997 PMCDBG(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm,
998 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1002 * If this PMC successfully allowed a GETMSR operation
1003 * in the past, disallow further ATTACHes.
1006 if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0)
1009 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1010 return pmc_attach_one_process(p, pm);
1013 * Traverse all child processes, attaching them to
1017 sx_slock(&proctree_lock);
1022 if ((error = pmc_attach_one_process(p, pm)) != 0)
1024 if (!LIST_EMPTY(&p->p_children))
1025 p = LIST_FIRST(&p->p_children);
1029 if (LIST_NEXT(p, p_sibling)) {
1030 p = LIST_NEXT(p, p_sibling);
1038 (void) pmc_detach_process(top, pm);
1041 sx_sunlock(&proctree_lock);
1046 * Detach a process from a PMC. If there are no other PMCs tracking
1047 * this process, remove the process structure from its hash table. If
1048 * 'flags' contains PMC_FLAG_REMOVE, then free the process structure.
1052 pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags)
1055 struct pmc_process *pp;
1057 sx_assert(&pmc_sx, SX_XLOCKED);
1060 ("[pmc,%d] null pm pointer", __LINE__));
1062 ri = PMC_TO_ROWINDEX(pm);
1064 PMCDBG(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x",
1065 pm, ri, p, p->p_pid, p->p_comm, flags);
1067 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
1070 if (pp->pp_pmcs[ri].pp_pmc != pm)
1073 pmc_unlink_target_process(pm, pp);
1075 /* Issue a detach entry if a log file is configured */
1076 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE)
1077 pmclog_process_pmcdetach(pm, p->p_pid);
1080 * If there are no PMCs targetting this process, we remove its
1081 * descriptor from the target hash table and unset the P_HWPMC
1082 * flag in the struct proc.
1084 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < (int) md->pmd_npmc,
1085 ("[pmc,%d] Illegal refcnt %d for process struct %p",
1086 __LINE__, pp->pp_refcnt, pp));
1088 if (pp->pp_refcnt != 0) /* still a target of some PMC */
1091 pmc_remove_process_descriptor(pp);
1093 if (flags & PMC_FLAG_REMOVE)
1097 p->p_flag &= ~P_HWPMC;
1104 * Detach a process and optionally its descendants from a PMC.
1108 pmc_detach_process(struct proc *p, struct pmc *pm)
1112 sx_assert(&pmc_sx, SX_XLOCKED);
1114 PMCDBG(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm,
1115 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1117 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1118 return pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1121 * Traverse all children, detaching them from this PMC. We
1122 * ignore errors since we could be detaching a PMC from a
1123 * partially attached proc tree.
1126 sx_slock(&proctree_lock);
1131 (void) pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1133 if (!LIST_EMPTY(&p->p_children))
1134 p = LIST_FIRST(&p->p_children);
1138 if (LIST_NEXT(p, p_sibling)) {
1139 p = LIST_NEXT(p, p_sibling);
1147 sx_sunlock(&proctree_lock);
1149 if (LIST_EMPTY(&pm->pm_targets))
1150 pm->pm_flags &= ~PMC_F_ATTACH_DONE;
1157 * Thread context switch IN
1161 pmc_process_csw_in(struct thread *td)
1169 struct pmc_process *pp;
1170 pmc_value_t newvalue;
1174 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL)
1177 KASSERT(pp->pp_proc == td->td_proc,
1178 ("[pmc,%d] not my thread state", __LINE__));
1180 critical_enter(); /* no preemption from this point */
1182 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1184 PMCDBG(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1185 p->p_pid, p->p_comm, pp);
1187 KASSERT(cpu >= 0 && cpu < mp_ncpus,
1188 ("[pmc,%d] wierd CPU id %d", __LINE__, cpu));
1192 for (ri = 0; ri < md->pmd_npmc; ri++) {
1194 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL)
1197 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
1198 ("[pmc,%d] Target PMC in non-virtual mode (%d)",
1199 __LINE__, PMC_TO_MODE(pm)));
1201 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1202 ("[pmc,%d] Row index mismatch pmc %d != ri %d",
1203 __LINE__, PMC_TO_ROWINDEX(pm), ri));
1206 * Only PMCs that are marked as 'RUNNING' need
1207 * be placed on hardware.
1210 if (pm->pm_state != PMC_STATE_RUNNING)
1213 /* increment PMC runcount */
1214 atomic_add_rel_32(&pm->pm_runcount, 1);
1216 /* configure the HWPMC we are going to use. */
1217 md->pmd_config_pmc(cpu, ri, pm);
1219 phw = pc->pc_hwpmcs[ri];
1221 KASSERT(phw != NULL,
1222 ("[pmc,%d] null hw pointer", __LINE__));
1224 KASSERT(phw->phw_pmc == pm,
1225 ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__,
1229 * Write out saved value and start the PMC.
1231 * Sampling PMCs use a per-process value, while
1232 * counting mode PMCs use a per-pmc value that is
1233 * inherited across descendants.
1235 if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
1236 mtx_pool_lock_spin(pmc_mtxpool, pm);
1237 newvalue = PMC_PCPU_SAVED(cpu,ri) =
1238 pp->pp_pmcs[ri].pp_pmcval;
1239 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1241 KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC,
1242 ("[pmc,%d] illegal mode=%d", __LINE__,
1244 mtx_pool_lock_spin(pmc_mtxpool, pm);
1245 newvalue = PMC_PCPU_SAVED(cpu, ri) =
1246 pm->pm_gv.pm_savedvalue;
1247 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1250 PMCDBG(CSW,SWI,1,"cpu=%d ri=%d new=%jd", cpu, ri, newvalue);
1252 md->pmd_write_pmc(cpu, ri, newvalue);
1253 md->pmd_start_pmc(cpu, ri);
1257 * perform any other architecture/cpu dependent thread
1258 * switch-in actions.
1261 (void) (*md->pmd_switch_in)(pc, pp);
1268 * Thread context switch OUT.
1272 pmc_process_csw_out(struct thread *td)
1280 struct pmc_process *pp;
1282 pmc_value_t newvalue;
1285 * Locate our process descriptor; this may be NULL if
1286 * this process is exiting and we have already removed
1287 * the process from the target process table.
1289 * Note that due to kernel preemption, multiple
1290 * context switches may happen while the process is
1293 * Note also that if the target process cannot be
1294 * found we still need to deconfigure any PMCs that
1295 * are currently running on hardware.
1299 pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE);
1307 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1309 PMCDBG(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1310 p->p_pid, p->p_comm, pp);
1312 KASSERT(cpu >= 0 && cpu < mp_ncpus,
1313 ("[pmc,%d wierd CPU id %d", __LINE__, cpu));
1318 * When a PMC gets unlinked from a target PMC, it will
1319 * be removed from the target's pp_pmc[] array.
1321 * However, on a MP system, the target could have been
1322 * executing on another CPU at the time of the unlink.
1323 * So, at context switch OUT time, we need to look at
1324 * the hardware to determine if a PMC is scheduled on
1328 for (ri = 0; ri < md->pmd_npmc; ri++) {
1331 (void) (*md->pmd_get_config)(cpu, ri, &pm);
1333 if (pm == NULL) /* nothing at this row index */
1336 mode = PMC_TO_MODE(pm);
1337 if (!PMC_IS_VIRTUAL_MODE(mode))
1338 continue; /* not a process virtual PMC */
1340 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1341 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
1342 __LINE__, PMC_TO_ROWINDEX(pm), ri));
1344 /* Stop hardware if not already stopped */
1345 if (pm->pm_stalled == 0)
1346 md->pmd_stop_pmc(cpu, ri);
1348 /* reduce this PMC's runcount */
1349 atomic_subtract_rel_32(&pm->pm_runcount, 1);
1352 * If this PMC is associated with this process,
1356 if (pp != NULL && pp->pp_pmcs[ri].pp_pmc != NULL) {
1358 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
1359 ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__,
1360 pm, ri, pp->pp_pmcs[ri].pp_pmc));
1362 KASSERT(pp->pp_refcnt > 0,
1363 ("[pmc,%d] pp refcnt = %d", __LINE__,
1366 md->pmd_read_pmc(cpu, ri, &newvalue);
1368 tmp = newvalue - PMC_PCPU_SAVED(cpu,ri);
1370 PMCDBG(CSW,SWI,1,"cpu=%d ri=%d tmp=%jd", cpu, ri,
1373 if (mode == PMC_MODE_TS) {
1376 * For sampling process-virtual PMCs,
1377 * we expect the count to be
1378 * decreasing as the 'value'
1379 * programmed into the PMC is the
1380 * number of events to be seen till
1381 * the next sampling interrupt.
1384 tmp += pm->pm_sc.pm_reloadcount;
1385 mtx_pool_lock_spin(pmc_mtxpool, pm);
1386 pp->pp_pmcs[ri].pp_pmcval -= tmp;
1387 if ((int64_t) pp->pp_pmcs[ri].pp_pmcval < 0)
1388 pp->pp_pmcs[ri].pp_pmcval +=
1389 pm->pm_sc.pm_reloadcount;
1390 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1395 * For counting process-virtual PMCs,
1396 * we expect the count to be
1397 * increasing monotonically, modulo a 64
1400 KASSERT((int64_t) tmp >= 0,
1401 ("[pmc,%d] negative increment cpu=%d "
1402 "ri=%d newvalue=%jx saved=%jx "
1403 "incr=%jx", __LINE__, cpu, ri,
1404 newvalue, PMC_PCPU_SAVED(cpu,ri), tmp));
1406 mtx_pool_lock_spin(pmc_mtxpool, pm);
1407 pm->pm_gv.pm_savedvalue += tmp;
1408 pp->pp_pmcs[ri].pp_pmcval += tmp;
1409 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1411 if (pm->pm_flags & PMC_F_LOG_PROCCSW)
1412 pmclog_process_proccsw(pm, pp, tmp);
1416 /* mark hardware as free */
1417 md->pmd_config_pmc(cpu, ri, NULL);
1421 * perform any other architecture/cpu dependent thread
1422 * switch out functions.
1425 (void) (*md->pmd_switch_out)(pc, pp);
1431 * Log a KLD operation.
1435 pmc_process_kld_load(struct pmckern_map_in *pkm)
1437 struct pmc_owner *po;
1439 sx_assert(&pmc_sx, SX_LOCKED);
1442 * Notify owners of system sampling PMCs about KLD operations.
1445 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1446 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1447 pmclog_process_map_in(po, (pid_t) -1, pkm->pm_address,
1448 (char *) pkm->pm_file);
1451 * TODO: Notify owners of (all) process-sampling PMCs too.
1458 pmc_process_kld_unload(struct pmckern_map_out *pkm)
1460 struct pmc_owner *po;
1462 sx_assert(&pmc_sx, SX_LOCKED);
1464 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1465 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1466 pmclog_process_map_out(po, (pid_t) -1,
1467 pkm->pm_address, pkm->pm_address + pkm->pm_size);
1470 * TODO: Notify owners of process-sampling PMCs.
1475 * A mapping change for a process.
1479 pmc_process_mmap(struct thread *td, struct pmckern_map_in *pkm)
1483 char *fullpath, *freepath;
1484 const struct pmc *pm;
1485 struct pmc_owner *po;
1486 const struct pmc_process *pp;
1488 freepath = fullpath = NULL;
1489 pmc_getfilename((struct vnode *) pkm->pm_file, &fullpath, &freepath);
1491 pid = td->td_proc->p_pid;
1493 /* Inform owners of all system-wide sampling PMCs. */
1494 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1495 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1496 pmclog_process_map_in(po, pid, pkm->pm_address, fullpath);
1498 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1502 * Inform sampling PMC owners tracking this process.
1504 for (ri = 0; ri < md->pmd_npmc; ri++)
1505 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
1506 PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1507 pmclog_process_map_in(pm->pm_owner,
1508 pid, pkm->pm_address, fullpath);
1512 FREE(freepath, M_TEMP);
1517 * Log an munmap request.
1521 pmc_process_munmap(struct thread *td, struct pmckern_map_out *pkm)
1525 struct pmc_owner *po;
1526 const struct pmc *pm;
1527 const struct pmc_process *pp;
1529 pid = td->td_proc->p_pid;
1531 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1532 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1533 pmclog_process_map_out(po, pid, pkm->pm_address,
1534 pkm->pm_address + pkm->pm_size);
1536 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1539 for (ri = 0; ri < md->pmd_npmc; ri++)
1540 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
1541 PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1542 pmclog_process_map_out(pm->pm_owner, pid,
1543 pkm->pm_address, pkm->pm_address + pkm->pm_size);
1547 * Log mapping information about the kernel.
1551 pmc_log_kernel_mappings(struct pmc *pm)
1553 struct pmc_owner *po;
1554 struct pmckern_map_in *km, *kmbase;
1556 sx_assert(&pmc_sx, SX_LOCKED);
1557 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
1558 ("[pmc,%d] non-sampling PMC (%p) desires mapping information",
1559 __LINE__, (void *) pm));
1563 if (po->po_flags & PMC_PO_INITIAL_MAPPINGS_DONE)
1567 * Log the current set of kernel modules.
1569 kmbase = linker_hwpmc_list_objects();
1570 for (km = kmbase; km->pm_file != NULL; km++) {
1571 PMCDBG(LOG,REG,1,"%s %p", (char *) km->pm_file,
1572 (void *) km->pm_address);
1573 pmclog_process_map_in(po, (pid_t) -1, km->pm_address,
1576 FREE(kmbase, M_LINKER);
1578 po->po_flags |= PMC_PO_INITIAL_MAPPINGS_DONE;
1582 * Log the mappings for a single process.
1586 pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
1591 * Log mappings for all processes in the system.
1595 pmc_log_all_process_mappings(struct pmc_owner *po)
1597 struct proc *p, *top;
1599 sx_assert(&pmc_sx, SX_XLOCKED);
1601 if ((p = pfind(1)) == NULL)
1602 panic("[pmc,%d] Cannot find init", __LINE__);
1606 sx_slock(&proctree_lock);
1611 pmc_log_process_mappings(po, p);
1612 if (!LIST_EMPTY(&p->p_children))
1613 p = LIST_FIRST(&p->p_children);
1617 if (LIST_NEXT(p, p_sibling)) {
1618 p = LIST_NEXT(p, p_sibling);
1625 sx_sunlock(&proctree_lock);
1629 * The 'hook' invoked from the kernel proper
1634 const char *pmc_hooknames[] = {
1635 /* these strings correspond to PMC_FN_* in <sys/pmckern.h> */
1650 pmc_hook_handler(struct thread *td, int function, void *arg)
1653 PMCDBG(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function,
1654 pmc_hooknames[function], arg);
1663 case PMC_FN_PROCESS_EXEC:
1665 char *fullpath, *freepath;
1667 int is_using_hwpmcs;
1670 struct pmc_owner *po;
1671 struct pmc_process *pp;
1672 struct pmckern_procexec *pk;
1674 sx_assert(&pmc_sx, SX_XLOCKED);
1677 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
1679 pk = (struct pmckern_procexec *) arg;
1681 /* Inform owners of SS mode PMCs of the exec event. */
1682 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1683 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1684 pmclog_process_procexec(po, PMC_ID_INVALID,
1685 p->p_pid, pk->pm_entryaddr, fullpath);
1688 is_using_hwpmcs = p->p_flag & P_HWPMC;
1691 if (!is_using_hwpmcs) {
1693 FREE(freepath, M_TEMP);
1698 * PMCs are not inherited across an exec(): remove any
1699 * PMCs that this process is the owner of.
1702 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
1703 pmc_remove_owner(po);
1704 pmc_destroy_owner_descriptor(po);
1708 * If the process being exec'ed is not the target of any
1711 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL) {
1713 FREE(freepath, M_TEMP);
1718 * Log the exec event to all monitoring owners. Skip
1719 * owners who have already recieved the event because
1720 * they had system sampling PMCs active.
1722 for (ri = 0; ri < md->pmd_npmc; ri++)
1723 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
1725 if (po->po_sscount == 0 &&
1726 po->po_flags & PMC_PO_OWNS_LOGFILE)
1727 pmclog_process_procexec(po, pm->pm_id,
1728 p->p_pid, pk->pm_entryaddr,
1733 FREE(freepath, M_TEMP);
1736 PMCDBG(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d",
1737 p, p->p_pid, p->p_comm, pk->pm_credentialschanged);
1739 if (pk->pm_credentialschanged == 0) /* no change */
1743 * If the newly exec()'ed process has a different credential
1744 * than before, allow it to be the target of a PMC only if
1745 * the PMC's owner has sufficient priviledge.
1748 for (ri = 0; ri < md->pmd_npmc; ri++)
1749 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL)
1750 if (pmc_can_attach(pm, td->td_proc) != 0)
1751 pmc_detach_one_process(td->td_proc,
1754 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < (int) md->pmd_npmc,
1755 ("[pmc,%d] Illegal ref count %d on pp %p", __LINE__,
1756 pp->pp_refcnt, pp));
1759 * If this process is no longer the target of any
1760 * PMCs, we can remove the process entry and free
1764 if (pp->pp_refcnt == 0) {
1765 pmc_remove_process_descriptor(pp);
1774 pmc_process_csw_in(td);
1777 case PMC_FN_CSW_OUT:
1778 pmc_process_csw_out(td);
1782 * Process accumulated PC samples.
1784 * This function is expected to be called by hardclock() for
1785 * each CPU that has accumulated PC samples.
1787 * This function is to be executed on the CPU whose samples
1788 * are being processed.
1790 case PMC_FN_DO_SAMPLES:
1793 * Clear the cpu specific bit in the CPU mask before
1794 * do the rest of the processing. If the NMI handler
1795 * gets invoked after the "atomic_clear_int()" call
1796 * below but before "pmc_process_samples()" gets
1797 * around to processing the interrupt, then we will
1798 * come back here at the next hardclock() tick (and
1799 * may find nothing to do if "pmc_process_samples()"
1800 * had already processed the interrupt). We don't
1801 * lose the interrupt sample.
1803 atomic_clear_int(&pmc_cpumask, (1 << PCPU_GET(cpuid)));
1804 pmc_process_samples(PCPU_GET(cpuid));
1808 case PMC_FN_KLD_LOAD:
1809 sx_assert(&pmc_sx, SX_LOCKED);
1810 pmc_process_kld_load((struct pmckern_map_in *) arg);
1813 case PMC_FN_KLD_UNLOAD:
1814 sx_assert(&pmc_sx, SX_LOCKED);
1815 pmc_process_kld_unload((struct pmckern_map_out *) arg);
1819 sx_assert(&pmc_sx, SX_LOCKED);
1820 pmc_process_mmap(td, (struct pmckern_map_in *) arg);
1824 sx_assert(&pmc_sx, SX_LOCKED);
1825 pmc_process_munmap(td, (struct pmckern_map_out *) arg);
1828 case PMC_FN_USER_CALLCHAIN:
1830 * Record a call chain.
1832 pmc_capture_user_callchain(PCPU_GET(cpuid),
1833 (struct trapframe *) arg);
1838 KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function));
1848 * allocate a 'struct pmc_owner' descriptor in the owner hash table.
1851 static struct pmc_owner *
1852 pmc_allocate_owner_descriptor(struct proc *p)
1855 struct pmc_owner *po;
1856 struct pmc_ownerhash *poh;
1858 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
1859 poh = &pmc_ownerhash[hindex];
1861 /* allocate space for N pointers and one descriptor struct */
1862 MALLOC(po, struct pmc_owner *, sizeof(struct pmc_owner),
1863 M_PMC, M_ZERO|M_WAITOK);
1865 po->po_sscount = po->po_error = po->po_flags = 0;
1868 po->po_kthread = NULL;
1869 LIST_INIT(&po->po_pmcs);
1870 LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */
1872 TAILQ_INIT(&po->po_logbuffers);
1873 mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc-per-proc", MTX_SPIN);
1875 PMCDBG(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p",
1876 p, p->p_pid, p->p_comm, po);
1882 pmc_destroy_owner_descriptor(struct pmc_owner *po)
1885 PMCDBG(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)",
1886 po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm);
1888 mtx_destroy(&po->po_mtx);
1893 * find the descriptor corresponding to process 'p', adding or removing it
1894 * as specified by 'mode'.
1897 static struct pmc_process *
1898 pmc_find_process_descriptor(struct proc *p, uint32_t mode)
1901 struct pmc_process *pp, *ppnew;
1902 struct pmc_processhash *pph;
1904 hindex = PMC_HASH_PTR(p, pmc_processhashmask);
1905 pph = &pmc_processhash[hindex];
1910 * Pre-allocate memory in the FIND_ALLOCATE case since we
1911 * cannot call malloc(9) once we hold a spin lock.
1914 if (mode & PMC_FLAG_ALLOCATE) {
1915 /* allocate additional space for 'n' pmc pointers */
1916 MALLOC(ppnew, struct pmc_process *,
1917 sizeof(struct pmc_process) + md->pmd_npmc *
1918 sizeof(struct pmc_targetstate), M_PMC, M_ZERO|M_WAITOK);
1921 mtx_lock_spin(&pmc_processhash_mtx);
1922 LIST_FOREACH(pp, pph, pp_next)
1923 if (pp->pp_proc == p)
1926 if ((mode & PMC_FLAG_REMOVE) && pp != NULL)
1927 LIST_REMOVE(pp, pp_next);
1929 if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL &&
1932 LIST_INSERT_HEAD(pph, ppnew, pp_next);
1936 mtx_unlock_spin(&pmc_processhash_mtx);
1938 if (pp != NULL && ppnew != NULL)
1945 * remove a process descriptor from the process hash table.
1949 pmc_remove_process_descriptor(struct pmc_process *pp)
1951 KASSERT(pp->pp_refcnt == 0,
1952 ("[pmc,%d] Removing process descriptor %p with count %d",
1953 __LINE__, pp, pp->pp_refcnt));
1955 mtx_lock_spin(&pmc_processhash_mtx);
1956 LIST_REMOVE(pp, pp_next);
1957 mtx_unlock_spin(&pmc_processhash_mtx);
1962 * find an owner descriptor corresponding to proc 'p'
1965 static struct pmc_owner *
1966 pmc_find_owner_descriptor(struct proc *p)
1969 struct pmc_owner *po;
1970 struct pmc_ownerhash *poh;
1972 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
1973 poh = &pmc_ownerhash[hindex];
1976 LIST_FOREACH(po, poh, po_next)
1977 if (po->po_owner == p)
1980 PMCDBG(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> "
1981 "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po);
1987 * pmc_allocate_pmc_descriptor
1989 * Allocate a pmc descriptor and initialize its
1994 pmc_allocate_pmc_descriptor(void)
1998 MALLOC(pmc, struct pmc *, sizeof(struct pmc), M_PMC, M_ZERO|M_WAITOK);
2001 pmc->pm_owner = NULL;
2002 LIST_INIT(&pmc->pm_targets);
2005 PMCDBG(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc);
2011 * Destroy a pmc descriptor.
2015 pmc_destroy_pmc_descriptor(struct pmc *pm)
2020 KASSERT(pm->pm_state == PMC_STATE_DELETED ||
2021 pm->pm_state == PMC_STATE_FREE,
2022 ("[pmc,%d] destroying non-deleted PMC", __LINE__));
2023 KASSERT(LIST_EMPTY(&pm->pm_targets),
2024 ("[pmc,%d] destroying pmc with targets", __LINE__));
2025 KASSERT(pm->pm_owner == NULL,
2026 ("[pmc,%d] destroying pmc attached to an owner", __LINE__));
2027 KASSERT(pm->pm_runcount == 0,
2028 ("[pmc,%d] pmc has non-zero run count %d", __LINE__,
2034 pmc_wait_for_pmc_idle(struct pmc *pm)
2037 volatile int maxloop;
2039 maxloop = 100 * mp_ncpus;
2043 * Loop (with a forced context switch) till the PMC's runcount
2044 * comes down to zero.
2046 while (atomic_load_acq_32(&pm->pm_runcount) > 0) {
2049 KASSERT(maxloop > 0,
2050 ("[pmc,%d] (ri%d, rc%d) waiting too long for "
2051 "pmc to be free", __LINE__,
2052 PMC_TO_ROWINDEX(pm), pm->pm_runcount));
2054 pmc_force_context_switch();
2059 * This function does the following things:
2061 * - detaches the PMC from hardware
2062 * - unlinks all target threads that were attached to it
2063 * - removes the PMC from its owner's list
2064 * - destroy's the PMC private mutex
2066 * Once this function completes, the given pmc pointer can be safely
2067 * FREE'd by the caller.
2071 pmc_release_pmc_descriptor(struct pmc *pm)
2076 struct pmc_owner *po;
2077 struct pmc_process *pp;
2078 struct pmc_target *ptgt, *tmp;
2079 struct pmc_binding pb;
2081 sx_assert(&pmc_sx, SX_XLOCKED);
2083 KASSERT(pm, ("[pmc,%d] null pmc", __LINE__));
2085 ri = PMC_TO_ROWINDEX(pm);
2086 mode = PMC_TO_MODE(pm);
2088 PMCDBG(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri,
2092 * First, we take the PMC off hardware.
2095 if (PMC_IS_SYSTEM_MODE(mode)) {
2098 * A system mode PMC runs on a specific CPU. Switch
2099 * to this CPU and turn hardware off.
2101 pmc_save_cpu_binding(&pb);
2103 cpu = PMC_TO_CPU(pm);
2105 pmc_select_cpu(cpu);
2107 /* switch off non-stalled CPUs */
2108 if (pm->pm_state == PMC_STATE_RUNNING &&
2109 pm->pm_stalled == 0) {
2111 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
2113 KASSERT(phw->phw_pmc == pm,
2114 ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)",
2115 __LINE__, ri, phw->phw_pmc, pm));
2116 PMCDBG(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri);
2119 md->pmd_stop_pmc(cpu, ri);
2123 PMCDBG(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri);
2126 md->pmd_config_pmc(cpu, ri, NULL);
2129 /* adjust the global and process count of SS mode PMCs */
2130 if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) {
2133 if (po->po_sscount == 0) {
2134 atomic_subtract_rel_int(&pmc_ss_count, 1);
2135 LIST_REMOVE(po, po_ssnext);
2139 pm->pm_state = PMC_STATE_DELETED;
2141 pmc_restore_cpu_binding(&pb);
2144 * We could have references to this PMC structure in
2145 * the per-cpu sample queues. Wait for the queue to
2148 pmc_wait_for_pmc_idle(pm);
2150 } else if (PMC_IS_VIRTUAL_MODE(mode)) {
2153 * A virtual PMC could be running on multiple CPUs at
2156 * By marking its state as DELETED, we ensure that
2157 * this PMC is never further scheduled on hardware.
2159 * Then we wait till all CPUs are done with this PMC.
2161 pm->pm_state = PMC_STATE_DELETED;
2164 /* Wait for the PMCs runcount to come to zero. */
2165 pmc_wait_for_pmc_idle(pm);
2168 * At this point the PMC is off all CPUs and cannot be
2169 * freshly scheduled onto a CPU. It is now safe to
2170 * unlink all targets from this PMC. If a
2171 * process-record's refcount falls to zero, we remove
2172 * it from the hash table. The module-wide SX lock
2173 * protects us from races.
2175 LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) {
2176 pp = ptgt->pt_process;
2177 pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */
2179 PMCDBG(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt);
2182 * If the target process record shows that no
2183 * PMCs are attached to it, reclaim its space.
2186 if (pp->pp_refcnt == 0) {
2187 pmc_remove_process_descriptor(pp);
2192 cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */
2197 * Release any MD resources
2200 (void) md->pmd_release_pmc(cpu, ri, pm);
2203 * Update row disposition
2206 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm)))
2207 PMC_UNMARK_ROW_STANDALONE(ri);
2209 PMC_UNMARK_ROW_THREAD(ri);
2211 /* unlink from the owner's list */
2213 LIST_REMOVE(pm, pm_next);
2214 pm->pm_owner = NULL;
2217 pmc_destroy_pmc_descriptor(pm);
2221 * Register an owner and a pmc.
2225 pmc_register_owner(struct proc *p, struct pmc *pmc)
2227 struct pmc_owner *po;
2229 sx_assert(&pmc_sx, SX_XLOCKED);
2231 if ((po = pmc_find_owner_descriptor(p)) == NULL)
2232 if ((po = pmc_allocate_owner_descriptor(p)) == NULL)
2235 KASSERT(pmc->pm_owner == NULL,
2236 ("[pmc,%d] attempting to own an initialized PMC", __LINE__));
2239 LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next);
2242 p->p_flag |= P_HWPMC;
2245 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
2246 pmclog_process_pmcallocate(pmc);
2248 PMCDBG(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p",
2255 * Return the current row disposition:
2257 * > 0 => PROCESS MODE
2258 * < 0 => SYSTEM MODE
2262 pmc_getrowdisp(int ri)
2264 return pmc_pmcdisp[ri];
2268 * Check if a PMC at row index 'ri' can be allocated to the current
2271 * Allocation can fail if:
2272 * - the current process is already being profiled by a PMC at index 'ri',
2273 * attached to it via OP_PMCATTACH.
2274 * - the current process has already allocated a PMC at index 'ri'
2279 pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu)
2283 struct pmc_owner *po;
2284 struct pmc_process *pp;
2286 PMCDBG(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d "
2287 "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu);
2290 * We shouldn't have already allocated a process-mode PMC at
2293 * We shouldn't have allocated a system-wide PMC on the same
2296 if ((po = pmc_find_owner_descriptor(p)) != NULL)
2297 LIST_FOREACH(pm, &po->po_pmcs, pm_next) {
2298 if (PMC_TO_ROWINDEX(pm) == ri) {
2299 mode = PMC_TO_MODE(pm);
2300 if (PMC_IS_VIRTUAL_MODE(mode))
2302 if (PMC_IS_SYSTEM_MODE(mode) &&
2303 (int) PMC_TO_CPU(pm) == cpu)
2309 * We also shouldn't be the target of any PMC at this index
2310 * since otherwise a PMC_ATTACH to ourselves will fail.
2312 if ((pp = pmc_find_process_descriptor(p, 0)) != NULL)
2313 if (pp->pp_pmcs[ri].pp_pmc)
2316 PMCDBG(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok",
2317 p, p->p_pid, p->p_comm, ri);
2323 * Check if a given PMC at row index 'ri' can be currently used in
2328 pmc_can_allocate_row(int ri, enum pmc_mode mode)
2332 sx_assert(&pmc_sx, SX_XLOCKED);
2334 PMCDBG(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode);
2336 if (PMC_IS_SYSTEM_MODE(mode))
2337 disp = PMC_DISP_STANDALONE;
2339 disp = PMC_DISP_THREAD;
2342 * check disposition for PMC row 'ri':
2344 * Expected disposition Row-disposition Result
2346 * STANDALONE STANDALONE or FREE proceed
2347 * STANDALONE THREAD fail
2348 * THREAD THREAD or FREE proceed
2349 * THREAD STANDALONE fail
2352 if (!PMC_ROW_DISP_IS_FREE(ri) &&
2353 !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)) &&
2354 !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri)))
2361 PMCDBG(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode);
2368 * Find a PMC descriptor with user handle 'pmcid' for thread 'td'.
2372 pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid)
2376 KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc,
2377 ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__,
2378 PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc));
2380 LIST_FOREACH(pm, &po->po_pmcs, pm_next)
2381 if (pm->pm_id == pmcid)
2388 pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc)
2392 struct pmc_owner *po;
2394 PMCDBG(PMC,FND,1, "find-pmc id=%d", pmcid);
2396 if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL)
2399 if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL)
2402 PMCDBG(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm);
2413 pmc_start(struct pmc *pm)
2417 struct pmc_owner *po;
2418 struct pmc_binding pb;
2421 ("[pmc,%d] null pm", __LINE__));
2423 mode = PMC_TO_MODE(pm);
2424 ri = PMC_TO_ROWINDEX(pm);
2427 PMCDBG(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, mode, ri);
2432 * Disallow PMCSTART if a logfile is required but has not been
2435 if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) &&
2436 (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
2437 return EDOOFUS; /* programming error */
2440 * If this is a sampling mode PMC, log mapping information for
2441 * the kernel modules that are currently loaded.
2443 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
2444 pmc_log_kernel_mappings(pm);
2446 if (PMC_IS_VIRTUAL_MODE(mode)) {
2449 * If a PMCATTACH has never been done on this PMC,
2450 * attach it to its owner process.
2453 if (LIST_EMPTY(&pm->pm_targets))
2454 error = (pm->pm_flags & PMC_F_ATTACH_DONE) ? ESRCH :
2455 pmc_attach_process(po->po_owner, pm);
2458 * If the PMC is attached to its owner, then force a context
2459 * switch to ensure that the MD state gets set correctly.
2463 pm->pm_state = PMC_STATE_RUNNING;
2464 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER)
2465 pmc_force_context_switch();
2473 * A system-wide PMC.
2475 * Add the owner to the global list if this is a system-wide
2479 if (mode == PMC_MODE_SS) {
2480 if (po->po_sscount == 0) {
2481 LIST_INSERT_HEAD(&pmc_ss_owners, po, po_ssnext);
2482 atomic_add_rel_int(&pmc_ss_count, 1);
2483 PMCDBG(PMC,OPS,1, "po=%p in global list", po);
2488 /* Log mapping information for all processes in the system. */
2489 pmc_log_all_process_mappings(po);
2492 * Move to the CPU associated with this
2493 * PMC, and start the hardware.
2496 pmc_save_cpu_binding(&pb);
2498 cpu = PMC_TO_CPU(pm);
2500 if (pmc_cpu_is_disabled(cpu))
2503 pmc_select_cpu(cpu);
2506 * global PMCs are configured at allocation time
2507 * so write out the initial value and start the PMC.
2510 pm->pm_state = PMC_STATE_RUNNING;
2513 if ((error = md->pmd_write_pmc(cpu, ri,
2514 PMC_IS_SAMPLING_MODE(mode) ?
2515 pm->pm_sc.pm_reloadcount :
2516 pm->pm_sc.pm_initial)) == 0)
2517 error = md->pmd_start_pmc(cpu, ri);
2520 pmc_restore_cpu_binding(&pb);
2530 pmc_stop(struct pmc *pm)
2533 struct pmc_owner *po;
2534 struct pmc_binding pb;
2536 KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__));
2538 PMCDBG(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm,
2539 PMC_TO_MODE(pm), PMC_TO_ROWINDEX(pm));
2541 pm->pm_state = PMC_STATE_STOPPED;
2544 * If the PMC is a virtual mode one, changing the state to
2545 * non-RUNNING is enough to ensure that the PMC never gets
2548 * If this PMC is current running on a CPU, then it will
2549 * handled correctly at the time its target process is context
2553 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
2557 * A system-mode PMC. Move to the CPU associated with
2558 * this PMC, and stop the hardware. We update the
2559 * 'initial count' so that a subsequent PMCSTART will
2560 * resume counting from the current hardware count.
2563 pmc_save_cpu_binding(&pb);
2565 cpu = PMC_TO_CPU(pm);
2567 KASSERT(cpu >= 0 && cpu < mp_ncpus,
2568 ("[pmc,%d] illegal cpu=%d", __LINE__, cpu));
2570 if (pmc_cpu_is_disabled(cpu))
2573 pmc_select_cpu(cpu);
2575 ri = PMC_TO_ROWINDEX(pm);
2578 if ((error = md->pmd_stop_pmc(cpu, ri)) == 0)
2579 error = md->pmd_read_pmc(cpu, ri, &pm->pm_sc.pm_initial);
2582 pmc_restore_cpu_binding(&pb);
2586 /* remove this owner from the global list of SS PMC owners */
2587 if (PMC_TO_MODE(pm) == PMC_MODE_SS) {
2589 if (po->po_sscount == 0) {
2590 atomic_subtract_rel_int(&pmc_ss_count, 1);
2591 LIST_REMOVE(po, po_ssnext);
2592 PMCDBG(PMC,OPS,2,"po=%p removed from global list", po);
2601 static const char *pmc_op_to_name[] = {
2603 #define __PMC_OP(N, D) #N ,
2610 * The syscall interface
2613 #define PMC_GET_SX_XLOCK(...) do { \
2614 sx_xlock(&pmc_sx); \
2615 if (pmc_hook == NULL) { \
2616 sx_xunlock(&pmc_sx); \
2617 return __VA_ARGS__; \
2621 #define PMC_DOWNGRADE_SX() do { \
2622 sx_downgrade(&pmc_sx); \
2623 is_sx_downgraded = 1; \
2627 pmc_syscall_handler(struct thread *td, void *syscall_args)
2629 int error, is_sx_downgraded, op;
2630 struct pmc_syscall_args *c;
2633 PMC_GET_SX_XLOCK(ENOSYS);
2637 is_sx_downgraded = 0;
2639 c = (struct pmc_syscall_args *) syscall_args;
2644 PMCDBG(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op,
2645 pmc_op_to_name[op], arg);
2648 atomic_add_int(&pmc_stats.pm_syscalls, 1);
2655 * Configure a log file.
2657 * XXX This OP will be reworked.
2660 case PMC_OP_CONFIGURELOG:
2664 struct pmc_owner *po;
2665 struct pmc_op_configurelog cl;
2667 sx_assert(&pmc_sx, SX_XLOCKED);
2669 if ((error = copyin(arg, &cl, sizeof(cl))) != 0)
2672 /* mark this process as owning a log file */
2674 if ((po = pmc_find_owner_descriptor(p)) == NULL)
2675 if ((po = pmc_allocate_owner_descriptor(p)) == NULL) {
2681 * If a valid fd was passed in, try to configure that,
2682 * otherwise if 'fd' was less than zero and there was
2683 * a log file configured, flush its buffers and
2686 if (cl.pm_logfd >= 0)
2687 error = pmclog_configure_log(po, cl.pm_logfd);
2688 else if (po->po_flags & PMC_PO_OWNS_LOGFILE) {
2689 pmclog_process_closelog(po);
2690 error = pmclog_flush(po);
2692 LIST_FOREACH(pm, &po->po_pmcs, pm_next)
2693 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
2694 pm->pm_state == PMC_STATE_RUNNING)
2696 error = pmclog_deconfigure_log(po);
2711 case PMC_OP_FLUSHLOG:
2713 struct pmc_owner *po;
2715 sx_assert(&pmc_sx, SX_XLOCKED);
2717 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
2722 error = pmclog_flush(po);
2727 * Retrieve hardware configuration.
2730 case PMC_OP_GETCPUINFO: /* CPU information */
2732 struct pmc_op_getcpuinfo gci;
2734 gci.pm_cputype = md->pmd_cputype;
2735 gci.pm_ncpu = mp_ncpus;
2736 gci.pm_npmc = md->pmd_npmc;
2737 gci.pm_nclass = md->pmd_nclass;
2738 bcopy(md->pmd_classes, &gci.pm_classes,
2739 sizeof(gci.pm_classes));
2740 error = copyout(&gci, arg, sizeof(gci));
2746 * Get module statistics
2749 case PMC_OP_GETDRIVERSTATS:
2751 struct pmc_op_getdriverstats gms;
2753 bcopy(&pmc_stats, &gms, sizeof(gms));
2754 error = copyout(&gms, arg, sizeof(gms));
2760 * Retrieve module version number
2763 case PMC_OP_GETMODULEVERSION:
2767 /* retrieve the client's idea of the ABI version */
2768 if ((error = copyin(arg, &cv, sizeof(uint32_t))) != 0)
2770 /* don't service clients newer than our driver */
2772 if ((cv & 0xFFFF0000) > (modv & 0xFFFF0000)) {
2773 error = EPROGMISMATCH;
2776 error = copyout(&modv, arg, sizeof(int));
2782 * Retrieve the state of all the PMCs on a given
2786 case PMC_OP_GETPMCINFO:
2788 uint32_t cpu, n, npmc;
2789 size_t pmcinfo_size;
2791 struct pmc_info *p, *pmcinfo;
2792 struct pmc_op_getpmcinfo *gpi;
2793 struct pmc_owner *po;
2794 struct pmc_binding pb;
2798 gpi = (struct pmc_op_getpmcinfo *) arg;
2800 if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0)
2803 if (cpu >= (unsigned int) mp_ncpus) {
2808 if (pmc_cpu_is_disabled(cpu)) {
2813 /* switch to CPU 'cpu' */
2814 pmc_save_cpu_binding(&pb);
2815 pmc_select_cpu(cpu);
2817 npmc = md->pmd_npmc;
2819 pmcinfo_size = npmc * sizeof(struct pmc_info);
2820 MALLOC(pmcinfo, struct pmc_info *, pmcinfo_size, M_PMC,
2825 for (n = 0; n < md->pmd_npmc; n++, p++) {
2827 if ((error = md->pmd_describe(cpu, n, p, &pm)) != 0)
2830 if (PMC_ROW_DISP_IS_STANDALONE(n))
2831 p->pm_rowdisp = PMC_DISP_STANDALONE;
2832 else if (PMC_ROW_DISP_IS_THREAD(n))
2833 p->pm_rowdisp = PMC_DISP_THREAD;
2835 p->pm_rowdisp = PMC_DISP_FREE;
2837 p->pm_ownerpid = -1;
2839 if (pm == NULL) /* no PMC associated */
2844 KASSERT(po->po_owner != NULL,
2845 ("[pmc,%d] pmc_owner had a null proc pointer",
2848 p->pm_ownerpid = po->po_owner->p_pid;
2849 p->pm_mode = PMC_TO_MODE(pm);
2850 p->pm_event = pm->pm_event;
2851 p->pm_flags = pm->pm_flags;
2853 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
2855 pm->pm_sc.pm_reloadcount;
2858 pmc_restore_cpu_binding(&pb);
2860 /* now copy out the PMC info collected */
2862 error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size);
2864 FREE(pmcinfo, M_PMC);
2870 * Set the administrative state of a PMC. I.e. whether
2871 * the PMC is to be used or not.
2874 case PMC_OP_PMCADMIN:
2877 enum pmc_state request;
2880 struct pmc_op_pmcadmin pma;
2881 struct pmc_binding pb;
2883 sx_assert(&pmc_sx, SX_XLOCKED);
2885 KASSERT(td == curthread,
2886 ("[pmc,%d] td != curthread", __LINE__));
2888 error = priv_check(td, PRIV_PMC_MANAGE);
2892 if ((error = copyin(arg, &pma, sizeof(pma))) != 0)
2897 if (cpu < 0 || cpu >= mp_ncpus) {
2902 if (pmc_cpu_is_disabled(cpu)) {
2907 request = pma.pm_state;
2909 if (request != PMC_STATE_DISABLED &&
2910 request != PMC_STATE_FREE) {
2915 ri = pma.pm_pmc; /* pmc id == row index */
2916 if (ri < 0 || ri >= (int) md->pmd_npmc) {
2922 * We can't disable a PMC with a row-index allocated
2923 * for process virtual PMCs.
2926 if (PMC_ROW_DISP_IS_THREAD(ri) &&
2927 request == PMC_STATE_DISABLED) {
2933 * otherwise, this PMC on this CPU is either free or
2934 * in system-wide mode.
2937 pmc_save_cpu_binding(&pb);
2938 pmc_select_cpu(cpu);
2941 phw = pc->pc_hwpmcs[ri];
2944 * XXX do we need some kind of 'forced' disable?
2947 if (phw->phw_pmc == NULL) {
2948 if (request == PMC_STATE_DISABLED &&
2949 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) {
2950 phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED;
2951 PMC_MARK_ROW_STANDALONE(ri);
2952 } else if (request == PMC_STATE_FREE &&
2953 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) {
2954 phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED;
2955 PMC_UNMARK_ROW_STANDALONE(ri);
2957 /* other cases are a no-op */
2961 pmc_restore_cpu_binding(&pb);
2970 case PMC_OP_PMCALLOCATE:
2978 struct pmc_op_pmcallocate pa;
2979 struct pmc_binding pb;
2981 if ((error = copyin(arg, &pa, sizeof(pa))) != 0)
2988 if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC &&
2989 mode != PMC_MODE_TS && mode != PMC_MODE_TC) ||
2990 (cpu != (u_int) PMC_CPU_ANY && cpu >= (u_int) mp_ncpus)) {
2996 * Virtual PMCs should only ask for a default CPU.
2997 * System mode PMCs need to specify a non-default CPU.
3000 if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != (u_int) PMC_CPU_ANY) ||
3001 (PMC_IS_SYSTEM_MODE(mode) && cpu == (u_int) PMC_CPU_ANY)) {
3007 * Check that a disabled CPU is not being asked for.
3010 if (PMC_IS_SYSTEM_MODE(mode) && pmc_cpu_is_disabled(cpu)) {
3016 * Refuse an allocation for a system-wide PMC if this
3017 * process has been jailed, or if this process lacks
3018 * super-user credentials and the sysctl tunable
3019 * 'security.bsd.unprivileged_syspmcs' is zero.
3022 if (PMC_IS_SYSTEM_MODE(mode)) {
3023 if (jailed(curthread->td_ucred)) {
3027 if (!pmc_unprivileged_syspmcs) {
3028 error = priv_check(curthread,
3039 * Look for valid values for 'pm_flags'
3042 if ((pa.pm_flags & ~(PMC_F_DESCENDANTS | PMC_F_LOG_PROCCSW |
3043 PMC_F_LOG_PROCEXIT | PMC_F_CALLCHAIN)) != 0) {
3048 /* process logging options are not allowed for system PMCs */
3049 if (PMC_IS_SYSTEM_MODE(mode) && (pa.pm_flags &
3050 (PMC_F_LOG_PROCCSW | PMC_F_LOG_PROCEXIT))) {
3056 * All sampling mode PMCs need to be able to interrupt the
3059 if (PMC_IS_SAMPLING_MODE(mode))
3060 caps |= PMC_CAP_INTERRUPT;
3062 /* A valid class specifier should have been passed in. */
3063 for (n = 0; n < md->pmd_nclass; n++)
3064 if (md->pmd_classes[n].pm_class == pa.pm_class)
3066 if (n == md->pmd_nclass) {
3071 /* The requested PMC capabilities should be feasible. */
3072 if ((md->pmd_classes[n].pm_caps & caps) != caps) {
3077 PMCDBG(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d",
3078 pa.pm_ev, caps, mode, cpu);
3080 pmc = pmc_allocate_pmc_descriptor();
3081 pmc->pm_id = PMC_ID_MAKE_ID(cpu,pa.pm_mode,pa.pm_class,
3083 pmc->pm_event = pa.pm_ev;
3084 pmc->pm_state = PMC_STATE_FREE;
3085 pmc->pm_caps = caps;
3086 pmc->pm_flags = pa.pm_flags;
3088 /* switch thread to CPU 'cpu' */
3089 pmc_save_cpu_binding(&pb);
3091 #define PMC_IS_SHAREABLE_PMC(cpu, n) \
3092 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state & \
3093 PMC_PHW_FLAG_IS_SHAREABLE)
3094 #define PMC_IS_UNALLOCATED(cpu, n) \
3095 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL)
3097 if (PMC_IS_SYSTEM_MODE(mode)) {
3098 pmc_select_cpu(cpu);
3099 for (n = 0; n < (int) md->pmd_npmc; n++)
3100 if (pmc_can_allocate_row(n, mode) == 0 &&
3101 pmc_can_allocate_rowindex(
3102 curthread->td_proc, n, cpu) == 0 &&
3103 (PMC_IS_UNALLOCATED(cpu, n) ||
3104 PMC_IS_SHAREABLE_PMC(cpu, n)) &&
3105 md->pmd_allocate_pmc(cpu, n, pmc,
3109 /* Process virtual mode */
3110 for (n = 0; n < (int) md->pmd_npmc; n++) {
3111 if (pmc_can_allocate_row(n, mode) == 0 &&
3112 pmc_can_allocate_rowindex(
3113 curthread->td_proc, n,
3114 PMC_CPU_ANY) == 0 &&
3115 md->pmd_allocate_pmc(curthread->td_oncpu,
3121 #undef PMC_IS_UNALLOCATED
3122 #undef PMC_IS_SHAREABLE_PMC
3124 pmc_restore_cpu_binding(&pb);
3126 if (n == (int) md->pmd_npmc) {
3127 pmc_destroy_pmc_descriptor(pmc);
3134 /* Fill in the correct value in the ID field */
3135 pmc->pm_id = PMC_ID_MAKE_ID(cpu,mode,pa.pm_class,n);
3137 PMCDBG(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x",
3138 pmc->pm_event, pa.pm_class, mode, n, pmc->pm_id);
3140 /* Process mode PMCs with logging enabled need log files */
3141 if (pmc->pm_flags & (PMC_F_LOG_PROCEXIT | PMC_F_LOG_PROCCSW))
3142 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
3144 /* All system mode sampling PMCs require a log file */
3145 if (PMC_IS_SAMPLING_MODE(mode) && PMC_IS_SYSTEM_MODE(mode))
3146 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
3149 * Configure global pmc's immediately
3152 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pmc))) {
3154 pmc_save_cpu_binding(&pb);
3155 pmc_select_cpu(cpu);
3157 phw = pmc_pcpu[cpu]->pc_hwpmcs[n];
3159 if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0 ||
3160 (error = md->pmd_config_pmc(cpu, n, pmc)) != 0) {
3161 (void) md->pmd_release_pmc(cpu, n, pmc);
3162 pmc_destroy_pmc_descriptor(pmc);
3165 pmc_restore_cpu_binding(&pb);
3170 pmc_restore_cpu_binding(&pb);
3173 pmc->pm_state = PMC_STATE_ALLOCATED;
3176 * mark row disposition
3179 if (PMC_IS_SYSTEM_MODE(mode))
3180 PMC_MARK_ROW_STANDALONE(n);
3182 PMC_MARK_ROW_THREAD(n);
3185 * Register this PMC with the current thread as its owner.
3189 pmc_register_owner(curthread->td_proc, pmc)) != 0) {
3190 pmc_release_pmc_descriptor(pmc);
3197 * Return the allocated index.
3200 pa.pm_pmcid = pmc->pm_id;
3202 error = copyout(&pa, arg, sizeof(pa));
3208 * Attach a PMC to a process.
3211 case PMC_OP_PMCATTACH:
3215 struct pmc_op_pmcattach a;
3217 sx_assert(&pmc_sx, SX_XLOCKED);
3219 if ((error = copyin(arg, &a, sizeof(a))) != 0)
3225 } else if (a.pm_pid == 0)
3226 a.pm_pid = td->td_proc->p_pid;
3228 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
3231 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
3236 /* PMCs may be (re)attached only when allocated or stopped */
3237 if (pm->pm_state == PMC_STATE_RUNNING) {
3240 } else if (pm->pm_state != PMC_STATE_ALLOCATED &&
3241 pm->pm_state != PMC_STATE_STOPPED) {
3247 if ((p = pfind(a.pm_pid)) == NULL) {
3253 * Ignore processes that are working on exiting.
3255 if (p->p_flag & P_WEXIT) {
3257 PROC_UNLOCK(p); /* pfind() returns a locked process */
3262 * we are allowed to attach a PMC to a process if
3265 error = p_candebug(curthread, p);
3270 error = pmc_attach_process(p, pm);
3276 * Detach an attached PMC from a process.
3279 case PMC_OP_PMCDETACH:
3283 struct pmc_op_pmcattach a;
3285 if ((error = copyin(arg, &a, sizeof(a))) != 0)
3291 } else if (a.pm_pid == 0)
3292 a.pm_pid = td->td_proc->p_pid;
3294 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
3297 if ((p = pfind(a.pm_pid)) == NULL) {
3303 * Treat processes that are in the process of exiting
3304 * as if they were not present.
3307 if (p->p_flag & P_WEXIT)
3310 PROC_UNLOCK(p); /* pfind() returns a locked process */
3313 error = pmc_detach_process(p, pm);
3319 * Retrieve the MSR number associated with the counter
3320 * 'pmc_id'. This allows processes to directly use RDPMC
3321 * instructions to read their PMCs, without the overhead of a
3325 case PMC_OP_PMCGETMSR:
3329 struct pmc_target *pt;
3330 struct pmc_op_getmsr gm;
3334 /* CPU has no 'GETMSR' support */
3335 if (md->pmd_get_msr == NULL) {
3340 if ((error = copyin(arg, &gm, sizeof(gm))) != 0)
3343 if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0)
3347 * The allocated PMC has to be a process virtual PMC,
3348 * i.e., of type MODE_T[CS]. Global PMCs can only be
3349 * read using the PMCREAD operation since they may be
3350 * allocated on a different CPU than the one we could
3351 * be running on at the time of the RDPMC instruction.
3353 * The GETMSR operation is not allowed for PMCs that
3354 * are inherited across processes.
3357 if (!PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) ||
3358 (pm->pm_flags & PMC_F_DESCENDANTS)) {
3364 * It only makes sense to use a RDPMC (or its
3365 * equivalent instruction on non-x86 architectures) on
3366 * a process that has allocated and attached a PMC to
3367 * itself. Conversely the PMC is only allowed to have
3368 * one process attached to it -- its owner.
3371 if ((pt = LIST_FIRST(&pm->pm_targets)) == NULL ||
3372 LIST_NEXT(pt, pt_next) != NULL ||
3373 pt->pt_process->pp_proc != pm->pm_owner->po_owner) {
3378 ri = PMC_TO_ROWINDEX(pm);
3380 if ((error = (*md->pmd_get_msr)(ri, &gm.pm_msr)) < 0)
3383 if ((error = copyout(&gm, arg, sizeof(gm))) < 0)
3387 * Mark our process as using MSRs. Update machine
3388 * state using a forced context switch.
3391 pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS;
3392 pmc_force_context_switch();
3398 * Release an allocated PMC
3401 case PMC_OP_PMCRELEASE:
3405 struct pmc_owner *po;
3406 struct pmc_op_simple sp;
3409 * Find PMC pointer for the named PMC.
3411 * Use pmc_release_pmc_descriptor() to switch off the
3412 * PMC, remove all its target threads, and remove the
3413 * PMC from its owner's list.
3415 * Remove the owner record if this is the last PMC
3421 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3424 pmcid = sp.pm_pmcid;
3426 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3430 pmc_release_pmc_descriptor(pm);
3431 pmc_maybe_remove_owner(po);
3439 * Read and/or write a PMC.
3446 struct pmc_op_pmcrw *pprw;
3447 struct pmc_op_pmcrw prw;
3448 struct pmc_binding pb;
3449 pmc_value_t oldvalue;
3453 if ((error = copyin(arg, &prw, sizeof(prw))) != 0)
3457 PMCDBG(PMC,OPS,1, "rw id=%d flags=0x%x", prw.pm_pmcid,
3460 /* must have at least one flag set */
3461 if ((prw.pm_flags & (PMC_F_OLDVALUE|PMC_F_NEWVALUE)) == 0) {
3466 /* locate pmc descriptor */
3467 if ((error = pmc_find_pmc(prw.pm_pmcid, &pm)) != 0)
3470 /* Can't read a PMC that hasn't been started. */
3471 if (pm->pm_state != PMC_STATE_ALLOCATED &&
3472 pm->pm_state != PMC_STATE_STOPPED &&
3473 pm->pm_state != PMC_STATE_RUNNING) {
3478 /* writing a new value is allowed only for 'STOPPED' pmcs */
3479 if (pm->pm_state == PMC_STATE_RUNNING &&
3480 (prw.pm_flags & PMC_F_NEWVALUE)) {
3485 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) {
3488 * If this PMC is attached to its owner (i.e.,
3489 * the process requesting this operation) and
3490 * is running, then attempt to get an
3491 * upto-date reading from hardware for a READ.
3492 * Writes are only allowed when the PMC is
3493 * stopped, so only update the saved value
3496 * If the PMC is not running, or is not
3497 * attached to its owner, read/write to the
3501 ri = PMC_TO_ROWINDEX(pm);
3503 mtx_pool_lock_spin(pmc_mtxpool, pm);
3504 cpu = curthread->td_oncpu;
3506 if (prw.pm_flags & PMC_F_OLDVALUE) {
3507 if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) &&
3508 (pm->pm_state == PMC_STATE_RUNNING))
3509 error = (*md->pmd_read_pmc)(cpu, ri,
3512 oldvalue = pm->pm_gv.pm_savedvalue;
3514 if (prw.pm_flags & PMC_F_NEWVALUE)
3515 pm->pm_gv.pm_savedvalue = prw.pm_value;
3517 mtx_pool_unlock_spin(pmc_mtxpool, pm);
3519 } else { /* System mode PMCs */
3520 cpu = PMC_TO_CPU(pm);
3521 ri = PMC_TO_ROWINDEX(pm);
3523 if (pmc_cpu_is_disabled(cpu)) {
3528 /* move this thread to CPU 'cpu' */
3529 pmc_save_cpu_binding(&pb);
3530 pmc_select_cpu(cpu);
3533 /* save old value */
3534 if (prw.pm_flags & PMC_F_OLDVALUE)
3535 if ((error = (*md->pmd_read_pmc)(cpu, ri,
3538 /* write out new value */
3539 if (prw.pm_flags & PMC_F_NEWVALUE)
3540 error = (*md->pmd_write_pmc)(cpu, ri,
3544 pmc_restore_cpu_binding(&pb);
3549 pprw = (struct pmc_op_pmcrw *) arg;
3552 if (prw.pm_flags & PMC_F_NEWVALUE)
3553 PMCDBG(PMC,OPS,2, "rw id=%d new %jx -> old %jx",
3554 ri, prw.pm_value, oldvalue);
3555 else if (prw.pm_flags & PMC_F_OLDVALUE)
3556 PMCDBG(PMC,OPS,2, "rw id=%d -> old %jx", ri, oldvalue);
3559 /* return old value if requested */
3560 if (prw.pm_flags & PMC_F_OLDVALUE)
3561 if ((error = copyout(&oldvalue, &pprw->pm_value,
3562 sizeof(prw.pm_value))))
3570 * Set the sampling rate for a sampling mode PMC and the
3571 * initial count for a counting mode PMC.
3574 case PMC_OP_PMCSETCOUNT:
3577 struct pmc_op_pmcsetcount sc;
3581 if ((error = copyin(arg, &sc, sizeof(sc))) != 0)
3584 if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0)
3587 if (pm->pm_state == PMC_STATE_RUNNING) {
3592 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
3593 pm->pm_sc.pm_reloadcount = sc.pm_count;
3595 pm->pm_sc.pm_initial = sc.pm_count;
3604 case PMC_OP_PMCSTART:
3608 struct pmc_op_simple sp;
3610 sx_assert(&pmc_sx, SX_XLOCKED);
3612 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3615 pmcid = sp.pm_pmcid;
3617 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3620 KASSERT(pmcid == pm->pm_id,
3621 ("[pmc,%d] pmcid %x != id %x", __LINE__,
3624 if (pm->pm_state == PMC_STATE_RUNNING) /* already running */
3626 else if (pm->pm_state != PMC_STATE_STOPPED &&
3627 pm->pm_state != PMC_STATE_ALLOCATED) {
3632 error = pmc_start(pm);
3641 case PMC_OP_PMCSTOP:
3645 struct pmc_op_simple sp;
3649 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3652 pmcid = sp.pm_pmcid;
3655 * Mark the PMC as inactive and invoke the MD stop
3656 * routines if needed.
3659 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3662 KASSERT(pmcid == pm->pm_id,
3663 ("[pmc,%d] pmc id %x != pmcid %x", __LINE__,
3666 if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */
3668 else if (pm->pm_state != PMC_STATE_RUNNING) {
3673 error = pmc_stop(pm);
3679 * Write a user supplied value to the log file.
3682 case PMC_OP_WRITELOG:
3684 struct pmc_op_writelog wl;
3685 struct pmc_owner *po;
3689 if ((error = copyin(arg, &wl, sizeof(wl))) != 0)
3692 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
3697 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
3702 error = pmclog_process_userlog(po, &wl);
3712 if (is_sx_downgraded)
3713 sx_sunlock(&pmc_sx);
3715 sx_xunlock(&pmc_sx);
3718 atomic_add_int(&pmc_stats.pm_syscall_errors, 1);
3731 * Mark the thread as needing callchain capture and post an AST. The
3732 * actual callchain capture will be done in a context where it is safe
3733 * to take page faults.
3737 pmc_post_callchain_ast(void)
3744 * Mark this thread as needing processing in ast().
3745 * td->td_pflags will be safe to touch as the process was in
3746 * user space when it was interrupted.
3748 td->td_pflags |= TDP_CALLCHAIN;
3751 * Again, since we've entered this function directly from
3752 * userland, `td' is guaranteed to be not locked by this CPU,
3753 * so its safe to try acquire the thread lock even though we
3754 * are executing in an NMI context. We need to acquire this
3755 * lock before touching `td_flags' because other CPUs may be
3756 * in the process of touching this field.
3759 td->td_flags |= TDF_ASTPENDING;
3766 * Interrupt processing.
3768 * Find a free slot in the per-cpu array of samples and capture the
3769 * current callchain there. If a sample was successfully added, a bit
3770 * is set in mask 'pmc_cpumask' denoting that the DO_SAMPLES hook
3771 * needs to be invoked from the clock handler.
3773 * This function is meant to be called from an NMI handler. It cannot
3774 * use any of the locking primitives supplied by the OS.
3778 pmc_process_interrupt(int cpu, struct pmc *pm, struct trapframe *tf,
3781 int error, callchaindepth;
3783 struct pmc_sample *ps;
3784 struct pmc_samplebuffer *psb;
3789 * Allocate space for a sample buffer.
3791 psb = pmc_pcpu[cpu]->pc_sb;
3794 if (ps->ps_nsamples) { /* in use, reader hasn't caught up */
3796 atomic_add_int(&pmc_stats.pm_intr_bufferfull, 1);
3797 PMCDBG(SAM,INT,1,"(spc) cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d",
3798 cpu, pm, (void *) tf, inuserspace,
3799 (int) (psb->ps_write - psb->ps_samples),
3800 (int) (psb->ps_read - psb->ps_samples));
3806 /* Fill in entry. */
3807 PMCDBG(SAM,INT,1,"cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", cpu, pm,
3808 (void *) tf, inuserspace,
3809 (int) (psb->ps_write - psb->ps_samples),
3810 (int) (psb->ps_read - psb->ps_samples));
3812 atomic_add_rel_32(&pm->pm_runcount, 1); /* hold onto PMC */
3814 if ((td = curthread) && td->td_proc)
3815 ps->ps_pid = td->td_proc->p_pid;
3819 ps->ps_flags = inuserspace ? PMC_CC_F_USERSPACE : 0;
3821 callchaindepth = (pm->pm_flags & PMC_F_CALLCHAIN) ?
3822 pmc_callchaindepth : 1;
3824 if (callchaindepth == 1)
3825 ps->ps_pc[0] = PMC_TRAPFRAME_TO_PC(tf);
3828 * Kernel stack traversals can be done immediately,
3829 * while we defer to an AST for user space traversals.
3833 pmc_save_kernel_callchain(ps->ps_pc,
3834 callchaindepth, tf);
3836 pmc_post_callchain_ast();
3837 callchaindepth = PMC_SAMPLE_INUSE;
3841 ps->ps_nsamples = callchaindepth; /* mark entry as in use */
3843 /* increment write pointer, modulo ring buffer size */
3845 if (ps == psb->ps_fence)
3846 psb->ps_write = psb->ps_samples;
3851 /* mark CPU as needing processing */
3852 atomic_set_rel_int(&pmc_cpumask, (1 << cpu));
3858 * Capture a user call chain. This function will be called from ast()
3859 * before control returns to userland and before the process gets
3864 pmc_capture_user_callchain(int cpu, struct trapframe *tf)
3868 struct pmc_sample *ps;
3869 struct pmc_samplebuffer *psb;
3871 psb = pmc_pcpu[cpu]->pc_sb;
3874 * Iterate through all deferred callchain requests.
3877 for (i = 0; i < pmc_nsamples; i++) {
3879 ps = &psb->ps_samples[i];
3880 if (ps->ps_nsamples != PMC_SAMPLE_INUSE)
3885 KASSERT(pm->pm_flags & PMC_F_CALLCHAIN,
3886 ("[pmc,%d] Retrieving callchain for PMC that doesn't "
3887 "want it", __LINE__));
3890 * Retrieve the callchain and mark the sample buffer
3891 * as 'processable' by the timer tick sweep code.
3893 ps->ps_nsamples = pmc_save_user_callchain(ps->ps_pc,
3894 pmc_callchaindepth, tf);
3902 * Process saved PC samples.
3906 pmc_process_samples(int cpu)
3911 struct pmc_owner *po;
3912 struct pmc_sample *ps;
3913 struct pmc_samplebuffer *psb;
3915 KASSERT(PCPU_GET(cpuid) == cpu,
3916 ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", __LINE__,
3917 PCPU_GET(cpuid), cpu));
3919 psb = pmc_pcpu[cpu]->pc_sb;
3921 for (n = 0; n < pmc_nsamples; n++) { /* bound on #iterations */
3924 if (ps->ps_nsamples == PMC_SAMPLE_FREE)
3926 if (ps->ps_nsamples == PMC_SAMPLE_INUSE) {
3927 /* Need a rescan at a later time. */
3928 atomic_set_rel_int(&pmc_cpumask, (1 << cpu));
3935 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
3936 ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__,
3937 pm, PMC_TO_MODE(pm)));
3939 /* Ignore PMCs that have been switched off */
3940 if (pm->pm_state != PMC_STATE_RUNNING)
3943 PMCDBG(SAM,OPS,1,"cpu=%d pm=%p n=%d fl=%x wr=%d rd=%d", cpu,
3944 pm, ps->ps_nsamples, ps->ps_flags,
3945 (int) (psb->ps_write - psb->ps_samples),
3946 (int) (psb->ps_read - psb->ps_samples));
3949 * If this is a process-mode PMC that is attached to
3950 * its owner, and if the PC is in user mode, update
3951 * profiling statistics like timer-based profiling
3954 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) {
3955 if (ps->ps_flags & PMC_CC_F_USERSPACE) {
3956 td = FIRST_THREAD_IN_PROC(po->po_owner);
3957 addupc_intr(td, ps->ps_pc[0], 1);
3963 * Otherwise, this is either a sampling mode PMC that
3964 * is attached to a different process than its owner,
3965 * or a system-wide sampling PMC. Dispatch a log
3966 * entry to the PMC's owner process.
3969 pmclog_process_callchain(pm, ps);
3972 ps->ps_nsamples = 0; /* mark entry as free */
3973 atomic_subtract_rel_32(&pm->pm_runcount, 1);
3975 /* increment read pointer, modulo sample size */
3976 if (++ps == psb->ps_fence)
3977 psb->ps_read = psb->ps_samples;
3982 atomic_add_int(&pmc_stats.pm_log_sweeps, 1);
3984 /* Do not re-enable stalled PMCs if we failed to process any samples */
3989 * Restart any stalled sampling PMCs on this CPU.
3991 * If the NMI handler sets the pm_stalled field of a PMC after
3992 * the check below, we'll end up processing the stalled PMC at
3993 * the next hardclock tick.
3995 for (n = 0; n < md->pmd_npmc; n++) {
3996 (void) (*md->pmd_get_config)(cpu,n,&pm);
3997 if (pm == NULL || /* !cfg'ed */
3998 pm->pm_state != PMC_STATE_RUNNING || /* !active */
3999 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) || /* !sampling */
4000 pm->pm_stalled == 0) /* !stalled */
4004 ri = PMC_TO_ROWINDEX(pm);
4005 (*md->pmd_start_pmc)(cpu, ri);
4014 * Handle a process exit.
4016 * Remove this process from all hash tables. If this process
4017 * owned any PMCs, turn off those PMCs and deallocate them,
4018 * removing any associations with target processes.
4020 * This function will be called by the last 'thread' of a
4023 * XXX This eventhandler gets called early in the exit process.
4024 * Consider using a 'hook' invocation from thread_exit() or equivalent
4025 * spot. Another negative is that kse_exit doesn't seem to call
4031 pmc_process_exit(void *arg __unused, struct proc *p)
4033 int is_using_hwpmcs;
4037 struct pmc_process *pp;
4038 struct pmc_owner *po;
4039 pmc_value_t newvalue, tmp;
4042 is_using_hwpmcs = p->p_flag & P_HWPMC;
4046 * Log a sysexit event to all SS PMC owners.
4048 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
4049 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
4050 pmclog_process_sysexit(po, p->p_pid);
4052 if (!is_using_hwpmcs)
4056 PMCDBG(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid,
4060 * Since this code is invoked by the last thread in an exiting
4061 * process, we would have context switched IN at some prior
4062 * point. However, with PREEMPTION, kernel mode context
4063 * switches may happen any time, so we want to disable a
4064 * context switch OUT till we get any PMCs targetting this
4065 * process off the hardware.
4067 * We also need to atomically remove this process'
4068 * entry from our target process hash table, using
4071 PMCDBG(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid,
4074 critical_enter(); /* no preemption */
4076 cpu = curthread->td_oncpu;
4078 if ((pp = pmc_find_process_descriptor(p,
4079 PMC_FLAG_REMOVE)) != NULL) {
4082 "process-exit proc=%p pmc-process=%p", p, pp);
4085 * The exiting process could the target of
4086 * some PMCs which will be running on
4087 * currently executing CPU.
4089 * We need to turn these PMCs off like we
4090 * would do at context switch OUT time.
4092 for (ri = 0; ri < md->pmd_npmc; ri++) {
4095 * Pick up the pmc pointer from hardware
4096 * state similar to the CSW_OUT code.
4099 (void) (*md->pmd_get_config)(cpu, ri, &pm);
4101 PMCDBG(PRC,EXT,2, "ri=%d pm=%p", ri, pm);
4104 !PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
4107 PMCDBG(PRC,EXT,2, "ppmcs[%d]=%p pm=%p "
4108 "state=%d", ri, pp->pp_pmcs[ri].pp_pmc,
4111 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
4112 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
4113 __LINE__, PMC_TO_ROWINDEX(pm), ri));
4115 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
4116 ("[pmc,%d] pm %p != pp_pmcs[%d] %p",
4117 __LINE__, pm, ri, pp->pp_pmcs[ri].pp_pmc));
4119 (void) md->pmd_stop_pmc(cpu, ri);
4121 KASSERT(pm->pm_runcount > 0,
4122 ("[pmc,%d] bad runcount ri %d rc %d",
4123 __LINE__, ri, pm->pm_runcount));
4125 /* Stop hardware only if it is actually running */
4126 if (pm->pm_state == PMC_STATE_RUNNING &&
4127 pm->pm_stalled == 0) {
4128 md->pmd_read_pmc(cpu, ri, &newvalue);
4130 PMC_PCPU_SAVED(cpu,ri);
4132 mtx_pool_lock_spin(pmc_mtxpool, pm);
4133 pm->pm_gv.pm_savedvalue += tmp;
4134 pp->pp_pmcs[ri].pp_pmcval += tmp;
4135 mtx_pool_unlock_spin(pmc_mtxpool, pm);
4138 atomic_subtract_rel_32(&pm->pm_runcount,1);
4140 KASSERT((int) pm->pm_runcount >= 0,
4141 ("[pmc,%d] runcount is %d", __LINE__, ri));
4143 (void) md->pmd_config_pmc(cpu, ri, NULL);
4147 * Inform the MD layer of this pseudo "context switch
4150 (void) md->pmd_switch_out(pmc_pcpu[cpu], pp);
4152 critical_exit(); /* ok to be pre-empted now */
4155 * Unlink this process from the PMCs that are
4156 * targetting it. This will send a signal to
4157 * all PMC owner's whose PMCs are orphaned.
4159 * Log PMC value at exit time if requested.
4161 for (ri = 0; ri < md->pmd_npmc; ri++)
4162 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
4163 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
4164 PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm)))
4165 pmclog_process_procexit(pm, pp);
4166 pmc_unlink_target_process(pm, pp);
4171 critical_exit(); /* pp == NULL */
4175 * If the process owned PMCs, free them up and free up
4178 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
4179 pmc_remove_owner(po);
4180 pmc_destroy_owner_descriptor(po);
4183 sx_xunlock(&pmc_sx);
4187 * Handle a process fork.
4189 * If the parent process 'p1' is under HWPMC monitoring, then copy
4190 * over any attached PMCs that have 'do_descendants' semantics.
4194 pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *newproc,
4197 int is_using_hwpmcs;
4199 uint32_t do_descendants;
4201 struct pmc_owner *po;
4202 struct pmc_process *ppnew, *ppold;
4204 (void) flags; /* unused parameter */
4207 is_using_hwpmcs = p1->p_flag & P_HWPMC;
4211 * If there are system-wide sampling PMCs active, we need to
4212 * log all fork events to their owner's logs.
4215 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
4216 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
4217 pmclog_process_procfork(po, p1->p_pid, newproc->p_pid);
4219 if (!is_using_hwpmcs)
4223 PMCDBG(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1,
4224 p1->p_pid, p1->p_comm, newproc);
4227 * If the parent process (curthread->td_proc) is a
4228 * target of any PMCs, look for PMCs that are to be
4229 * inherited, and link these into the new process
4232 if ((ppold = pmc_find_process_descriptor(curthread->td_proc,
4233 PMC_FLAG_NONE)) == NULL)
4234 goto done; /* nothing to do */
4237 for (ri = 0; ri < md->pmd_npmc; ri++)
4238 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL)
4239 do_descendants |= pm->pm_flags & PMC_F_DESCENDANTS;
4240 if (do_descendants == 0) /* nothing to do */
4243 /* allocate a descriptor for the new process */
4244 if ((ppnew = pmc_find_process_descriptor(newproc,
4245 PMC_FLAG_ALLOCATE)) == NULL)
4249 * Run through all PMCs that were targeting the old process
4250 * and which specified F_DESCENDANTS and attach them to the
4253 * Log the fork event to all owners of PMCs attached to this
4254 * process, if not already logged.
4256 for (ri = 0; ri < md->pmd_npmc; ri++)
4257 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL &&
4258 (pm->pm_flags & PMC_F_DESCENDANTS)) {
4259 pmc_link_target_process(pm, ppnew);
4261 if (po->po_sscount == 0 &&
4262 po->po_flags & PMC_PO_OWNS_LOGFILE)
4263 pmclog_process_procfork(po, p1->p_pid,
4268 * Now mark the new process as being tracked by this driver.
4271 newproc->p_flag |= P_HWPMC;
4272 PROC_UNLOCK(newproc);
4275 sx_xunlock(&pmc_sx);
4283 static const char *pmc_name_of_pmcclass[] = {
4285 #define __PMC_CLASS(N) #N ,
4290 pmc_initialize(void)
4293 struct pmc_binding pb;
4294 struct pmc_sample *ps;
4295 struct pmc_samplebuffer *sb;
4301 /* parse debug flags first */
4302 if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags",
4303 pmc_debugstr, sizeof(pmc_debugstr)))
4304 pmc_debugflags_parse(pmc_debugstr,
4305 pmc_debugstr+strlen(pmc_debugstr));
4308 PMCDBG(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION);
4310 /* check kernel version */
4311 if (pmc_kernel_version != PMC_VERSION) {
4312 if (pmc_kernel_version == 0)
4313 printf("hwpmc: this kernel has not been compiled with "
4314 "'options HWPMC_HOOKS'.\n");
4316 printf("hwpmc: kernel version (0x%x) does not match "
4317 "module version (0x%x).\n", pmc_kernel_version,
4319 return EPROGMISMATCH;
4323 * check sysctl parameters
4326 if (pmc_hashsize <= 0) {
4327 (void) printf("hwpmc: tunable \"hashsize\"=%d must be "
4328 "greater than zero.\n", pmc_hashsize);
4329 pmc_hashsize = PMC_HASH_SIZE;
4332 if (pmc_nsamples <= 0 || pmc_nsamples > 65535) {
4333 (void) printf("hwpmc: tunable \"nsamples\"=%d out of "
4334 "range.\n", pmc_nsamples);
4335 pmc_nsamples = PMC_NSAMPLES;
4338 if (pmc_callchaindepth <= 0 ||
4339 pmc_callchaindepth > PMC_CALLCHAIN_DEPTH_MAX) {
4340 (void) printf("hwpmc: tunable \"callchaindepth\"=%d out of "
4341 "range.\n", pmc_callchaindepth);
4342 pmc_callchaindepth = PMC_CALLCHAIN_DEPTH;
4345 md = pmc_md_initialize();
4347 if (md == NULL || md->pmd_init == NULL)
4350 /* allocate space for the per-cpu array */
4351 MALLOC(pmc_pcpu, struct pmc_cpu **, mp_ncpus * sizeof(struct pmc_cpu *),
4352 M_PMC, M_WAITOK|M_ZERO);
4354 /* per-cpu 'saved values' for managing process-mode PMCs */
4355 MALLOC(pmc_pcpu_saved, pmc_value_t *,
4356 sizeof(pmc_value_t) * mp_ncpus * md->pmd_npmc, M_PMC, M_WAITOK);
4358 /* perform cpu dependent initialization */
4359 pmc_save_cpu_binding(&pb);
4360 for (cpu = 0; cpu < mp_ncpus; cpu++) {
4361 if (pmc_cpu_is_disabled(cpu))
4363 pmc_select_cpu(cpu);
4364 if ((error = md->pmd_init(cpu)) != 0)
4367 pmc_restore_cpu_binding(&pb);
4372 /* allocate space for the sample array */
4373 for (cpu = 0; cpu < mp_ncpus; cpu++) {
4374 if (pmc_cpu_is_disabled(cpu))
4376 MALLOC(sb, struct pmc_samplebuffer *,
4377 sizeof(struct pmc_samplebuffer) +
4378 pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
4381 sb->ps_read = sb->ps_write = sb->ps_samples;
4382 sb->ps_fence = sb->ps_samples + pmc_nsamples;
4383 KASSERT(pmc_pcpu[cpu] != NULL,
4384 ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
4386 MALLOC(sb->ps_callchains, uintptr_t *,
4387 pmc_callchaindepth * pmc_nsamples * sizeof(uintptr_t),
4388 M_PMC, M_WAITOK|M_ZERO);
4390 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
4391 ps->ps_pc = sb->ps_callchains +
4392 (n * pmc_callchaindepth);
4394 pmc_pcpu[cpu]->pc_sb = sb;
4397 /* allocate space for the row disposition array */
4398 pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc,
4399 M_PMC, M_WAITOK|M_ZERO);
4401 KASSERT(pmc_pmcdisp != NULL,
4402 ("[pmc,%d] pmcdisp allocation returned NULL", __LINE__));
4404 /* mark all PMCs as available */
4405 for (n = 0; n < (int) md->pmd_npmc; n++)
4406 PMC_MARK_ROW_FREE(n);
4408 /* allocate thread hash tables */
4409 pmc_ownerhash = hashinit(pmc_hashsize, M_PMC,
4410 &pmc_ownerhashmask);
4412 pmc_processhash = hashinit(pmc_hashsize, M_PMC,
4413 &pmc_processhashmask);
4414 mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf",
4417 LIST_INIT(&pmc_ss_owners);
4420 /* allocate a pool of spin mutexes */
4421 pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size,
4424 PMCDBG(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx "
4425 "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask,
4426 pmc_processhash, pmc_processhashmask);
4428 /* register process {exit,fork,exec} handlers */
4429 pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit,
4430 pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY);
4431 pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork,
4432 pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY);
4434 /* initialize logging */
4435 pmclog_initialize();
4437 /* set hook functions */
4438 pmc_intr = md->pmd_intr;
4439 pmc_hook = pmc_hook_handler;
4442 printf(PMC_MODULE_NAME ":");
4443 for (n = 0; n < (int) md->pmd_nclass; n++) {
4444 printf(" %s/%d/0x%b",
4445 pmc_name_of_pmcclass[md->pmd_classes[n].pm_class],
4446 md->pmd_nclasspmcs[n],
4447 md->pmd_classes[n].pm_caps,
4449 "\1INT\2USR\3SYS\4EDG\5THR"
4450 "\6REA\7WRI\10INV\11QUA\12PRC"
4459 /* prepare to be unloaded */
4464 struct pmc_ownerhash *ph;
4465 struct pmc_owner *po, *tmp;
4466 struct pmc_binding pb;
4468 struct pmc_processhash *prh;
4471 PMCDBG(MOD,INI,0, "%s", "cleanup");
4473 /* switch off sampling */
4474 atomic_store_rel_int(&pmc_cpumask, 0);
4478 if (pmc_hook == NULL) { /* being unloaded already */
4479 sx_xunlock(&pmc_sx);
4483 pmc_hook = NULL; /* prevent new threads from entering module */
4485 /* deregister event handlers */
4486 EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag);
4487 EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag);
4489 /* send SIGBUS to all owner threads, free up allocations */
4491 for (ph = pmc_ownerhash;
4492 ph <= &pmc_ownerhash[pmc_ownerhashmask];
4494 LIST_FOREACH_SAFE(po, ph, po_next, tmp) {
4495 pmc_remove_owner(po);
4497 /* send SIGBUS to owner processes */
4498 PMCDBG(MOD,INI,2, "cleanup signal proc=%p "
4499 "(%d, %s)", po->po_owner,
4500 po->po_owner->p_pid,
4501 po->po_owner->p_comm);
4503 PROC_LOCK(po->po_owner);
4504 psignal(po->po_owner, SIGBUS);
4505 PROC_UNLOCK(po->po_owner);
4507 pmc_destroy_owner_descriptor(po);
4511 /* reclaim allocated data structures */
4513 mtx_pool_destroy(&pmc_mtxpool);
4515 mtx_destroy(&pmc_processhash_mtx);
4516 if (pmc_processhash) {
4518 struct pmc_process *pp;
4520 PMCDBG(MOD,INI,3, "%s", "destroy process hash");
4521 for (prh = pmc_processhash;
4522 prh <= &pmc_processhash[pmc_processhashmask];
4524 LIST_FOREACH(pp, prh, pp_next)
4525 PMCDBG(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid);
4528 hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask);
4529 pmc_processhash = NULL;
4532 if (pmc_ownerhash) {
4533 PMCDBG(MOD,INI,3, "%s", "destroy owner hash");
4534 hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask);
4535 pmc_ownerhash = NULL;
4538 KASSERT(LIST_EMPTY(&pmc_ss_owners),
4539 ("[pmc,%d] Global SS owner list not empty", __LINE__));
4540 KASSERT(pmc_ss_count == 0,
4541 ("[pmc,%d] Global SS count not empty", __LINE__));
4543 /* free the per-cpu sample buffers */
4544 for (cpu = 0; cpu < mp_ncpus; cpu++) {
4545 if (pmc_cpu_is_disabled(cpu))
4547 KASSERT(pmc_pcpu[cpu]->pc_sb != NULL,
4548 ("[pmc,%d] Null cpu sample buffer cpu=%d", __LINE__,
4550 FREE(pmc_pcpu[cpu]->pc_sb->ps_callchains, M_PMC);
4551 FREE(pmc_pcpu[cpu]->pc_sb, M_PMC);
4552 pmc_pcpu[cpu]->pc_sb = NULL;
4555 /* do processor dependent cleanup */
4556 PMCDBG(MOD,INI,3, "%s", "md cleanup");
4558 pmc_save_cpu_binding(&pb);
4559 for (cpu = 0; cpu < mp_ncpus; cpu++) {
4560 PMCDBG(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p",
4561 cpu, pmc_pcpu[cpu]);
4562 if (pmc_cpu_is_disabled(cpu))
4564 pmc_select_cpu(cpu);
4566 (void) md->pmd_cleanup(cpu);
4570 pmc_restore_cpu_binding(&pb);
4573 /* deallocate per-cpu structures */
4574 FREE(pmc_pcpu, M_PMC);
4577 FREE(pmc_pcpu_saved, M_PMC);
4578 pmc_pcpu_saved = NULL;
4581 FREE(pmc_pmcdisp, M_PMC);
4587 sx_xunlock(&pmc_sx); /* we are done */
4591 * The function called at load/unload.
4595 load (struct module *module __unused, int cmd, void *arg __unused)
4603 /* initialize the subsystem */
4604 error = pmc_initialize();
4607 PMCDBG(MOD,INI,1, "syscall=%d ncpus=%d",
4608 pmc_syscall_num, mp_ncpus);
4615 PMCDBG(MOD,INI,1, "%s", "unloaded");
4619 error = EINVAL; /* XXX should panic(9) */
4627 MALLOC_DEFINE(M_PMC, "pmc", "Memory space for the PMC module");