2 * Copyright (c) 2003-2006 Joseph Koshy
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/eventhandler.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/limits.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/mutex.h>
42 #include <sys/pmckern.h>
43 #include <sys/pmclog.h>
46 #include <sys/queue.h>
47 #include <sys/resourcevar.h>
48 #include <sys/sched.h>
49 #include <sys/signalvar.h>
52 #include <sys/sysctl.h>
53 #include <sys/sysent.h>
54 #include <sys/systm.h>
55 #include <sys/vnode.h>
57 #include <sys/linker.h> /* needs to be after <sys/malloc.h> */
59 #include <machine/atomic.h>
60 #include <machine/md_var.h>
67 PMC_FLAG_NONE = 0x00, /* do nothing */
68 PMC_FLAG_REMOVE = 0x01, /* atomically remove entry from hash */
69 PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */
73 * The offset in sysent where the syscall is allocated.
76 static int pmc_syscall_num = NO_SYSCALL;
77 struct pmc_cpu **pmc_pcpu; /* per-cpu state */
78 pmc_value_t *pmc_pcpu_saved; /* saved PMC values: CSW handling */
80 #define PMC_PCPU_SAVED(C,R) pmc_pcpu_saved[(R) + md->pmd_npmc*(C)]
82 struct mtx_pool *pmc_mtxpool;
83 static int *pmc_pmcdisp; /* PMC row dispositions */
85 #define PMC_ROW_DISP_IS_FREE(R) (pmc_pmcdisp[(R)] == 0)
86 #define PMC_ROW_DISP_IS_THREAD(R) (pmc_pmcdisp[(R)] > 0)
87 #define PMC_ROW_DISP_IS_STANDALONE(R) (pmc_pmcdisp[(R)] < 0)
89 #define PMC_MARK_ROW_FREE(R) do { \
90 pmc_pmcdisp[(R)] = 0; \
93 #define PMC_MARK_ROW_STANDALONE(R) do { \
94 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
96 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
97 KASSERT(pmc_pmcdisp[(R)] >= (-mp_ncpus), ("[pmc,%d] row " \
98 "disposition error", __LINE__)); \
101 #define PMC_UNMARK_ROW_STANDALONE(R) do { \
102 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
103 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
107 #define PMC_MARK_ROW_THREAD(R) do { \
108 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
110 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
113 #define PMC_UNMARK_ROW_THREAD(R) do { \
114 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
115 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
120 /* various event handlers */
121 static eventhandler_tag pmc_exit_tag, pmc_fork_tag;
123 /* Module statistics */
124 struct pmc_op_getdriverstats pmc_stats;
126 /* Machine/processor dependent operations */
130 * Hash tables mapping owner processes and target threads to PMCs.
133 struct mtx pmc_processhash_mtx; /* spin mutex */
134 static u_long pmc_processhashmask;
135 static LIST_HEAD(pmc_processhash, pmc_process) *pmc_processhash;
138 * Hash table of PMC owner descriptors. This table is protected by
139 * the shared PMC "sx" lock.
142 static u_long pmc_ownerhashmask;
143 static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash;
146 * List of PMC owners with system-wide sampling PMCs.
149 static LIST_HEAD(, pmc_owner) pmc_ss_owners;
157 static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS);
158 static int pmc_debugflags_parse(char *newstr, char *fence);
161 static int load(struct module *module, int cmd, void *arg);
162 static int pmc_attach_process(struct proc *p, struct pmc *pm);
163 static struct pmc *pmc_allocate_pmc_descriptor(void);
164 static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p);
165 static int pmc_attach_one_process(struct proc *p, struct pmc *pm);
166 static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri,
168 static int pmc_can_attach(struct pmc *pm, struct proc *p);
169 static void pmc_cleanup(void);
170 static int pmc_detach_process(struct proc *p, struct pmc *pm);
171 static int pmc_detach_one_process(struct proc *p, struct pmc *pm,
173 static void pmc_destroy_owner_descriptor(struct pmc_owner *po);
174 static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p);
175 static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm);
176 static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po,
178 static struct pmc_process *pmc_find_process_descriptor(struct proc *p,
180 static void pmc_force_context_switch(void);
181 static void pmc_link_target_process(struct pmc *pm,
182 struct pmc_process *pp);
183 static void pmc_maybe_remove_owner(struct pmc_owner *po);
184 static void pmc_process_csw_in(struct thread *td);
185 static void pmc_process_csw_out(struct thread *td);
186 static void pmc_process_exit(void *arg, struct proc *p);
187 static void pmc_process_fork(void *arg, struct proc *p1,
188 struct proc *p2, int n);
189 static void pmc_process_samples(int cpu);
190 static void pmc_release_pmc_descriptor(struct pmc *pmc);
191 static void pmc_remove_owner(struct pmc_owner *po);
192 static void pmc_remove_process_descriptor(struct pmc_process *pp);
193 static void pmc_restore_cpu_binding(struct pmc_binding *pb);
194 static void pmc_save_cpu_binding(struct pmc_binding *pb);
195 static void pmc_select_cpu(int cpu);
196 static int pmc_start(struct pmc *pm);
197 static int pmc_stop(struct pmc *pm);
198 static int pmc_syscall_handler(struct thread *td, void *syscall_args);
199 static void pmc_unlink_target_process(struct pmc *pmc,
200 struct pmc_process *pp);
203 * Kernel tunables and sysctl(8) interface.
206 SYSCTL_NODE(_kern, OID_AUTO, hwpmc, CTLFLAG_RW, 0, "HWPMC parameters");
209 struct pmc_debugflags pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS;
210 char pmc_debugstr[PMC_DEBUG_STRSIZE];
211 TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,
212 sizeof(pmc_debugstr));
213 SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
214 CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_TUN,
215 0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags");
219 * kern.hwpmc.hashrows -- determines the number of rows in the
220 * of the hash table used to look up threads
223 static int pmc_hashsize = PMC_HASH_SIZE;
224 TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "hashsize", &pmc_hashsize);
225 SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_TUN|CTLFLAG_RD,
226 &pmc_hashsize, 0, "rows in hash tables");
229 * kern.hwpmc.nsamples --- number of PC samples per CPU
232 static int pmc_nsamples = PMC_NSAMPLES;
233 TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nsamples", &pmc_nsamples);
234 SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_TUN|CTLFLAG_RD,
235 &pmc_nsamples, 0, "number of PC samples per CPU");
238 * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool.
241 static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE;
242 TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "mtxpoolsize", &pmc_mtxpool_size);
243 SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_TUN|CTLFLAG_RD,
244 &pmc_mtxpool_size, 0, "size of spin mutex pool");
248 * security.bsd.unprivileged_syspmcs -- allow non-root processes to
249 * allocate system-wide PMCs.
251 * Allowing unprivileged processes to allocate system PMCs is convenient
252 * if system-wide measurements need to be taken concurrently with other
253 * per-process measurements. This feature is turned off by default.
256 static int pmc_unprivileged_syspmcs = 0;
257 TUNABLE_INT("security.bsd.unprivileged_syspmcs", &pmc_unprivileged_syspmcs);
258 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RW,
259 &pmc_unprivileged_syspmcs, 0,
260 "allow unprivileged process to allocate system PMCs");
263 * Hash function. Discard the lower 2 bits of the pointer since
264 * these are always zero for our uses. The hash multiplier is
265 * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
269 #define _PMC_HM 11400714819323198486u
271 #define _PMC_HM 2654435769u
273 #error Must know the size of 'long' to compile
276 #define PMC_HASH_PTR(P,M) ((((unsigned long) (P) >> 2) * _PMC_HM) & (M))
282 /* The `sysent' for the new syscall */
283 static struct sysent pmc_sysent = {
285 pmc_syscall_handler /* sy_call */
288 static struct syscall_module_data pmc_syscall_mod = {
296 static moduledata_t pmc_mod = {
298 syscall_module_handler,
302 DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY);
303 MODULE_VERSION(pmc, PMC_VERSION);
306 enum pmc_dbgparse_state {
307 PMCDS_WS, /* in whitespace */
308 PMCDS_MAJOR, /* seen a major keyword */
313 pmc_debugflags_parse(char *newstr, char *fence)
316 struct pmc_debugflags *tmpflags;
317 int error, found, *newbits, tmp;
320 MALLOC(tmpflags, struct pmc_debugflags *, sizeof(*tmpflags),
321 M_PMC, M_WAITOK|M_ZERO);
326 for (; p < fence && (c = *p); p++) {
328 /* skip white space */
329 if (c == ' ' || c == '\t')
332 /* look for a keyword followed by "=" */
333 for (q = p; p < fence && (c = *p) && c != '='; p++)
343 /* lookup flag group name */
344 #define DBG_SET_FLAG_MAJ(S,F) \
345 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
346 newbits = &tmpflags->pdb_ ## F;
348 DBG_SET_FLAG_MAJ("cpu", CPU);
349 DBG_SET_FLAG_MAJ("csw", CSW);
350 DBG_SET_FLAG_MAJ("logging", LOG);
351 DBG_SET_FLAG_MAJ("module", MOD);
352 DBG_SET_FLAG_MAJ("md", MDP);
353 DBG_SET_FLAG_MAJ("owner", OWN);
354 DBG_SET_FLAG_MAJ("pmc", PMC);
355 DBG_SET_FLAG_MAJ("process", PRC);
356 DBG_SET_FLAG_MAJ("sampling", SAM);
358 if (newbits == NULL) {
363 p++; /* skip the '=' */
365 /* Now parse the individual flags */
368 for (q = p; p < fence && (c = *p); p++)
369 if (c == ' ' || c == '\t' || c == ',')
372 /* p == fence or c == ws or c == "," or c == 0 */
374 if ((kwlen = p - q) == 0) {
380 #define DBG_SET_FLAG_MIN(S,F) \
381 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
382 tmp |= found = (1 << PMC_DEBUG_MIN_ ## F)
384 /* a '*' denotes all possible flags in the group */
385 if (kwlen == 1 && *q == '*')
387 /* look for individual flag names */
388 DBG_SET_FLAG_MIN("allocaterow", ALR);
389 DBG_SET_FLAG_MIN("allocate", ALL);
390 DBG_SET_FLAG_MIN("attach", ATT);
391 DBG_SET_FLAG_MIN("bind", BND);
392 DBG_SET_FLAG_MIN("config", CFG);
393 DBG_SET_FLAG_MIN("exec", EXC);
394 DBG_SET_FLAG_MIN("exit", EXT);
395 DBG_SET_FLAG_MIN("find", FND);
396 DBG_SET_FLAG_MIN("flush", FLS);
397 DBG_SET_FLAG_MIN("fork", FRK);
398 DBG_SET_FLAG_MIN("getbuf", GTB);
399 DBG_SET_FLAG_MIN("hook", PMH);
400 DBG_SET_FLAG_MIN("init", INI);
401 DBG_SET_FLAG_MIN("intr", INT);
402 DBG_SET_FLAG_MIN("linktarget", TLK);
403 DBG_SET_FLAG_MIN("mayberemove", OMR);
404 DBG_SET_FLAG_MIN("ops", OPS);
405 DBG_SET_FLAG_MIN("read", REA);
406 DBG_SET_FLAG_MIN("register", REG);
407 DBG_SET_FLAG_MIN("release", REL);
408 DBG_SET_FLAG_MIN("remove", ORM);
409 DBG_SET_FLAG_MIN("sample", SAM);
410 DBG_SET_FLAG_MIN("scheduleio", SIO);
411 DBG_SET_FLAG_MIN("select", SEL);
412 DBG_SET_FLAG_MIN("signal", SIG);
413 DBG_SET_FLAG_MIN("swi", SWI);
414 DBG_SET_FLAG_MIN("swo", SWO);
415 DBG_SET_FLAG_MIN("start", STA);
416 DBG_SET_FLAG_MIN("stop", STO);
417 DBG_SET_FLAG_MIN("syscall", PMS);
418 DBG_SET_FLAG_MIN("unlinktarget", TUL);
419 DBG_SET_FLAG_MIN("write", WRI);
421 /* unrecognized flag name */
426 if (c == 0 || c == ' ' || c == '\t') { /* end of flag group */
435 /* save the new flag set */
436 bcopy(tmpflags, &pmc_debugflags, sizeof(pmc_debugflags));
439 FREE(tmpflags, M_PMC);
444 pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS)
446 char *fence, *newstr;
450 (void) arg1; (void) arg2; /* unused parameters */
452 n = sizeof(pmc_debugstr);
453 MALLOC(newstr, char *, n, M_PMC, M_ZERO|M_WAITOK);
454 (void) strlcpy(newstr, pmc_debugstr, n);
456 error = sysctl_handle_string(oidp, newstr, n, req);
458 /* if there is a new string, parse and copy it */
459 if (error == 0 && req->newptr != NULL) {
460 fence = newstr + (n < req->newlen ? n : req->newlen + 1);
461 if ((error = pmc_debugflags_parse(newstr, fence)) == 0)
462 (void) strlcpy(pmc_debugstr, newstr,
463 sizeof(pmc_debugstr));
473 * Concurrency Control
475 * The driver manages the following data structures:
477 * - target process descriptors, one per target process
478 * - owner process descriptors (and attached lists), one per owner process
479 * - lookup hash tables for owner and target processes
480 * - PMC descriptors (and attached lists)
481 * - per-cpu hardware state
482 * - the 'hook' variable through which the kernel calls into
484 * - the machine hardware state (managed by the MD layer)
486 * These data structures are accessed from:
488 * - thread context-switch code
489 * - interrupt handlers (possibly on multiple cpus)
490 * - kernel threads on multiple cpus running on behalf of user
491 * processes doing system calls
492 * - this driver's private kernel threads
494 * = Locks and Locking strategy =
496 * The driver uses four locking strategies for its operation:
498 * - The global SX lock "pmc_sx" is used to protect internal
501 * Calls into the module by syscall() start with this lock being
502 * held in exclusive mode. Depending on the requested operation,
503 * the lock may be downgraded to 'shared' mode to allow more
504 * concurrent readers into the module. Calls into the module from
505 * other parts of the kernel acquire the lock in shared mode.
507 * This SX lock is held in exclusive mode for any operations that
508 * modify the linkages between the driver's internal data structures.
510 * The 'pmc_hook' function pointer is also protected by this lock.
511 * It is only examined with the sx lock held in exclusive mode. The
512 * kernel module is allowed to be unloaded only with the sx lock held
513 * in exclusive mode. In normal syscall handling, after acquiring the
514 * pmc_sx lock we first check that 'pmc_hook' is non-null before
515 * proceeding. This prevents races between the thread unloading the module
516 * and other threads seeking to use the module.
518 * - Lookups of target process structures and owner process structures
519 * cannot use the global "pmc_sx" SX lock because these lookups need
520 * to happen during context switches and in other critical sections
521 * where sleeping is not allowed. We protect these lookup tables
522 * with their own private spin-mutexes, "pmc_processhash_mtx" and
523 * "pmc_ownerhash_mtx".
525 * - Interrupt handlers work in a lock free manner. At interrupt
526 * time, handlers look at the PMC pointer (phw->phw_pmc) configured
527 * when the PMC was started. If this pointer is NULL, the interrupt
528 * is ignored after updating driver statistics. We ensure that this
529 * pointer is set (using an atomic operation if necessary) before the
530 * PMC hardware is started. Conversely, this pointer is unset atomically
531 * only after the PMC hardware is stopped.
533 * We ensure that everything needed for the operation of an
534 * interrupt handler is available without it needing to acquire any
535 * locks. We also ensure that a PMC's software state is destroyed only
536 * after the PMC is taken off hardware (on all CPUs).
538 * - Context-switch handling with process-private PMCs needs more
541 * A given process may be the target of multiple PMCs. For example,
542 * PMCATTACH and PMCDETACH may be requested by a process on one CPU
543 * while the target process is running on another. A PMC could also
544 * be getting released because its owner is exiting. We tackle
545 * these situations in the following manner:
547 * - each target process structure 'pmc_process' has an array
548 * of 'struct pmc *' pointers, one for each hardware PMC.
550 * - At context switch IN time, each "target" PMC in RUNNING state
551 * gets started on hardware and a pointer to each PMC is copied into
552 * the per-cpu phw array. The 'runcount' for the PMC is
555 * - At context switch OUT time, all process-virtual PMCs are stopped
556 * on hardware. The saved value is added to the PMCs value field
557 * only if the PMC is in a non-deleted state (the PMCs state could
558 * have changed during the current time slice).
560 * Note that since in-between a switch IN on a processor and a switch
561 * OUT, the PMC could have been released on another CPU. Therefore
562 * context switch OUT always looks at the hardware state to turn
563 * OFF PMCs and will update a PMC's saved value only if reachable
564 * from the target process record.
566 * - OP PMCRELEASE could be called on a PMC at any time (the PMC could
567 * be attached to many processes at the time of the call and could
568 * be active on multiple CPUs).
570 * We prevent further scheduling of the PMC by marking it as in
571 * state 'DELETED'. If the runcount of the PMC is non-zero then
572 * this PMC is currently running on a CPU somewhere. The thread
573 * doing the PMCRELEASE operation waits by repeatedly doing a
574 * pause() till the runcount comes to zero.
576 * The contents of a PMC descriptor (struct pmc) are protected using
577 * a spin-mutex. In order to save space, we use a mutex pool.
579 * In terms of lock types used by witness(4), we use:
580 * - Type "pmc-sx", used by the global SX lock.
581 * - Type "pmc-sleep", for sleep mutexes used by logger threads.
582 * - Type "pmc-per-proc", for protecting PMC owner descriptors.
583 * - Type "pmc-leaf", used for all other spin mutexes.
587 * save the cpu binding of the current kthread
591 pmc_save_cpu_binding(struct pmc_binding *pb)
593 PMCDBG(CPU,BND,2, "%s", "save-cpu");
594 thread_lock(curthread);
595 pb->pb_bound = sched_is_bound(curthread);
596 pb->pb_cpu = curthread->td_oncpu;
597 thread_unlock(curthread);
598 PMCDBG(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu);
602 * restore the cpu binding of the current thread
606 pmc_restore_cpu_binding(struct pmc_binding *pb)
608 PMCDBG(CPU,BND,2, "restore-cpu curcpu=%d restore=%d",
609 curthread->td_oncpu, pb->pb_cpu);
610 thread_lock(curthread);
612 sched_bind(curthread, pb->pb_cpu);
614 sched_unbind(curthread);
615 thread_unlock(curthread);
616 PMCDBG(CPU,BND,2, "%s", "restore-cpu done");
620 * move execution over the specified cpu and bind it there.
624 pmc_select_cpu(int cpu)
626 KASSERT(cpu >= 0 && cpu < mp_ncpus,
627 ("[pmc,%d] bad cpu number %d", __LINE__, cpu));
629 /* never move to a disabled CPU */
630 KASSERT(pmc_cpu_is_disabled(cpu) == 0, ("[pmc,%d] selecting "
631 "disabled CPU %d", __LINE__, cpu));
633 PMCDBG(CPU,SEL,2, "select-cpu cpu=%d", cpu);
634 thread_lock(curthread);
635 sched_bind(curthread, cpu);
636 thread_unlock(curthread);
638 KASSERT(curthread->td_oncpu == cpu,
639 ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__,
640 cpu, curthread->td_oncpu));
642 PMCDBG(CPU,SEL,2, "select-cpu cpu=%d ok", cpu);
646 * Force a context switch.
648 * We do this by pause'ing for 1 tick -- invoking mi_switch() is not
649 * guaranteed to force a context switch.
653 pmc_force_context_switch(void)
660 * Get the file name for an executable. This is a simple wrapper
661 * around vn_fullpath(9).
665 pmc_getfilename(struct vnode *v, char **fullpath, char **freepath)
670 *fullpath = "unknown";
672 vn_lock(v, LK_CANRECURSE | LK_EXCLUSIVE | LK_RETRY, td);
673 vn_fullpath(td, v, fullpath, freepath);
674 VOP_UNLOCK(v, 0, td);
678 * remove an process owning PMCs
682 pmc_remove_owner(struct pmc_owner *po)
684 struct pmc *pm, *tmp;
686 sx_assert(&pmc_sx, SX_XLOCKED);
688 PMCDBG(OWN,ORM,1, "remove-owner po=%p", po);
690 /* Remove descriptor from the owner hash table */
691 LIST_REMOVE(po, po_next);
693 /* release all owned PMC descriptors */
694 LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) {
695 PMCDBG(OWN,ORM,2, "pmc=%p", pm);
696 KASSERT(pm->pm_owner == po,
697 ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po));
699 pmc_release_pmc_descriptor(pm); /* will unlink from the list */
702 KASSERT(po->po_sscount == 0,
703 ("[pmc,%d] SS count not zero", __LINE__));
704 KASSERT(LIST_EMPTY(&po->po_pmcs),
705 ("[pmc,%d] PMC list not empty", __LINE__));
707 /* de-configure the log file if present */
708 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
709 pmclog_deconfigure_log(po);
713 * remove an owner process record if all conditions are met.
717 pmc_maybe_remove_owner(struct pmc_owner *po)
720 PMCDBG(OWN,OMR,1, "maybe-remove-owner po=%p", po);
723 * Remove owner record if
724 * - this process does not own any PMCs
725 * - this process has not allocated a system-wide sampling buffer
728 if (LIST_EMPTY(&po->po_pmcs) &&
729 ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)) {
730 pmc_remove_owner(po);
731 pmc_destroy_owner_descriptor(po);
736 * Add an association between a target process and a PMC.
740 pmc_link_target_process(struct pmc *pm, struct pmc_process *pp)
743 struct pmc_target *pt;
745 sx_assert(&pmc_sx, SX_XLOCKED);
747 KASSERT(pm != NULL && pp != NULL,
748 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
749 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
750 ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d",
751 __LINE__, pm, pp->pp_proc->p_pid));
752 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < ((int) md->pmd_npmc - 1),
753 ("[pmc,%d] Illegal reference count %d for process record %p",
754 __LINE__, pp->pp_refcnt, (void *) pp));
756 ri = PMC_TO_ROWINDEX(pm);
758 PMCDBG(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p",
762 LIST_FOREACH(pt, &pm->pm_targets, pt_next)
763 if (pt->pt_process == pp)
764 KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets",
768 MALLOC(pt, struct pmc_target *, sizeof(struct pmc_target),
769 M_PMC, M_ZERO|M_WAITOK);
773 LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next);
775 atomic_store_rel_ptr((uintptr_t *)&pp->pp_pmcs[ri].pp_pmc,
778 if (pm->pm_owner->po_owner == pp->pp_proc)
779 pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER;
782 * Initialize the per-process values at this row index.
784 pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ?
785 pm->pm_sc.pm_reloadcount : 0;
792 * Removes the association between a target process and a PMC.
796 pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
800 struct pmc_target *ptgt;
802 sx_assert(&pmc_sx, SX_XLOCKED);
804 KASSERT(pm != NULL && pp != NULL,
805 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
807 KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt < (int) md->pmd_npmc,
808 ("[pmc,%d] Illegal ref count %d on process record %p",
809 __LINE__, pp->pp_refcnt, (void *) pp));
811 ri = PMC_TO_ROWINDEX(pm);
813 PMCDBG(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p",
816 KASSERT(pp->pp_pmcs[ri].pp_pmc == pm,
817 ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__,
818 ri, pm, pp->pp_pmcs[ri].pp_pmc));
820 pp->pp_pmcs[ri].pp_pmc = NULL;
821 pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0;
823 /* Remove owner-specific flags */
824 if (pm->pm_owner->po_owner == pp->pp_proc) {
825 pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS;
826 pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER;
831 /* Remove the target process from the PMC structure */
832 LIST_FOREACH(ptgt, &pm->pm_targets, pt_next)
833 if (ptgt->pt_process == pp)
836 KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found "
837 "in pmc %p", __LINE__, pp->pp_proc, pp, pm));
839 LIST_REMOVE(ptgt, pt_next);
842 /* if the PMC now lacks targets, send the owner a SIGIO */
843 if (LIST_EMPTY(&pm->pm_targets)) {
844 p = pm->pm_owner->po_owner;
849 PMCDBG(PRC,SIG,2, "signalling proc=%p signal=%d", p,
855 * Check if PMC 'pm' may be attached to target process 't'.
859 pmc_can_attach(struct pmc *pm, struct proc *t)
861 struct proc *o; /* pmc owner */
862 struct ucred *oc, *tc; /* owner, target credentials */
863 int decline_attach, i;
866 * A PMC's owner can always attach that PMC to itself.
869 if ((o = pm->pm_owner->po_owner) == t)
883 * The effective uid of the PMC owner should match at least one
884 * of the {effective,real,saved} uids of the target process.
887 decline_attach = oc->cr_uid != tc->cr_uid &&
888 oc->cr_uid != tc->cr_svuid &&
889 oc->cr_uid != tc->cr_ruid;
892 * Every one of the target's group ids, must be in the owner's
895 for (i = 0; !decline_attach && i < tc->cr_ngroups; i++)
896 decline_attach = !groupmember(tc->cr_groups[i], oc);
898 /* check the read and saved gids too */
899 if (decline_attach == 0)
900 decline_attach = !groupmember(tc->cr_rgid, oc) ||
901 !groupmember(tc->cr_svgid, oc);
906 return !decline_attach;
910 * Attach a process to a PMC.
914 pmc_attach_one_process(struct proc *p, struct pmc *pm)
917 char *fullpath, *freepath;
918 struct pmc_process *pp;
920 sx_assert(&pmc_sx, SX_XLOCKED);
922 PMCDBG(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm,
923 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
926 * Locate the process descriptor corresponding to process 'p',
927 * allocating space as needed.
929 * Verify that rowindex 'pm_rowindex' is free in the process
932 * If not, allocate space for a descriptor and link the
933 * process descriptor and PMC.
935 ri = PMC_TO_ROWINDEX(pm);
937 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL)
940 if (pp->pp_pmcs[ri].pp_pmc == pm) /* already present at slot [ri] */
943 if (pp->pp_pmcs[ri].pp_pmc != NULL)
946 pmc_link_target_process(pm, pp);
948 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) &&
949 (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) == 0)
950 pm->pm_flags |= PMC_F_NEEDS_LOGFILE;
952 pm->pm_flags |= PMC_F_ATTACH_DONE; /* mark as attached */
954 /* issue an attach event to a configured log file */
955 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) {
956 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
957 pmclog_process_pmcattach(pm, p->p_pid, fullpath);
959 FREE(freepath, M_TEMP);
961 /* mark process as using HWPMCs */
963 p->p_flag |= P_HWPMC;
970 * Attach a process and optionally its children
974 pmc_attach_process(struct proc *p, struct pmc *pm)
979 sx_assert(&pmc_sx, SX_XLOCKED);
981 PMCDBG(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm,
982 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
986 * If this PMC successfully allowed a GETMSR operation
987 * in the past, disallow further ATTACHes.
990 if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0)
993 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
994 return pmc_attach_one_process(p, pm);
997 * Traverse all child processes, attaching them to
1001 sx_slock(&proctree_lock);
1006 if ((error = pmc_attach_one_process(p, pm)) != 0)
1008 if (!LIST_EMPTY(&p->p_children))
1009 p = LIST_FIRST(&p->p_children);
1013 if (LIST_NEXT(p, p_sibling)) {
1014 p = LIST_NEXT(p, p_sibling);
1022 (void) pmc_detach_process(top, pm);
1025 sx_sunlock(&proctree_lock);
1030 * Detach a process from a PMC. If there are no other PMCs tracking
1031 * this process, remove the process structure from its hash table. If
1032 * 'flags' contains PMC_FLAG_REMOVE, then free the process structure.
1036 pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags)
1039 struct pmc_process *pp;
1041 sx_assert(&pmc_sx, SX_XLOCKED);
1044 ("[pmc,%d] null pm pointer", __LINE__));
1046 ri = PMC_TO_ROWINDEX(pm);
1048 PMCDBG(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x",
1049 pm, ri, p, p->p_pid, p->p_comm, flags);
1051 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
1054 if (pp->pp_pmcs[ri].pp_pmc != pm)
1057 pmc_unlink_target_process(pm, pp);
1059 /* Issue a detach entry if a log file is configured */
1060 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE)
1061 pmclog_process_pmcdetach(pm, p->p_pid);
1064 * If there are no PMCs targetting this process, we remove its
1065 * descriptor from the target hash table and unset the P_HWPMC
1066 * flag in the struct proc.
1068 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < (int) md->pmd_npmc,
1069 ("[pmc,%d] Illegal refcnt %d for process struct %p",
1070 __LINE__, pp->pp_refcnt, pp));
1072 if (pp->pp_refcnt != 0) /* still a target of some PMC */
1075 pmc_remove_process_descriptor(pp);
1077 if (flags & PMC_FLAG_REMOVE)
1081 p->p_flag &= ~P_HWPMC;
1088 * Detach a process and optionally its descendants from a PMC.
1092 pmc_detach_process(struct proc *p, struct pmc *pm)
1096 sx_assert(&pmc_sx, SX_XLOCKED);
1098 PMCDBG(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm,
1099 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1101 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1102 return pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1105 * Traverse all children, detaching them from this PMC. We
1106 * ignore errors since we could be detaching a PMC from a
1107 * partially attached proc tree.
1110 sx_slock(&proctree_lock);
1115 (void) pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1117 if (!LIST_EMPTY(&p->p_children))
1118 p = LIST_FIRST(&p->p_children);
1122 if (LIST_NEXT(p, p_sibling)) {
1123 p = LIST_NEXT(p, p_sibling);
1131 sx_sunlock(&proctree_lock);
1133 if (LIST_EMPTY(&pm->pm_targets))
1134 pm->pm_flags &= ~PMC_F_ATTACH_DONE;
1141 * Thread context switch IN
1145 pmc_process_csw_in(struct thread *td)
1153 struct pmc_process *pp;
1154 pmc_value_t newvalue;
1158 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL)
1161 KASSERT(pp->pp_proc == td->td_proc,
1162 ("[pmc,%d] not my thread state", __LINE__));
1164 critical_enter(); /* no preemption from this point */
1166 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1168 PMCDBG(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1169 p->p_pid, p->p_comm, pp);
1171 KASSERT(cpu >= 0 && cpu < mp_ncpus,
1172 ("[pmc,%d] wierd CPU id %d", __LINE__, cpu));
1176 for (ri = 0; ri < md->pmd_npmc; ri++) {
1178 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL)
1181 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
1182 ("[pmc,%d] Target PMC in non-virtual mode (%d)",
1183 __LINE__, PMC_TO_MODE(pm)));
1185 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1186 ("[pmc,%d] Row index mismatch pmc %d != ri %d",
1187 __LINE__, PMC_TO_ROWINDEX(pm), ri));
1190 * Only PMCs that are marked as 'RUNNING' need
1191 * be placed on hardware.
1194 if (pm->pm_state != PMC_STATE_RUNNING)
1197 /* increment PMC runcount */
1198 atomic_add_rel_32(&pm->pm_runcount, 1);
1200 /* configure the HWPMC we are going to use. */
1201 md->pmd_config_pmc(cpu, ri, pm);
1203 phw = pc->pc_hwpmcs[ri];
1205 KASSERT(phw != NULL,
1206 ("[pmc,%d] null hw pointer", __LINE__));
1208 KASSERT(phw->phw_pmc == pm,
1209 ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__,
1213 * Write out saved value and start the PMC.
1215 * Sampling PMCs use a per-process value, while
1216 * counting mode PMCs use a per-pmc value that is
1217 * inherited across descendants.
1219 if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
1220 mtx_pool_lock_spin(pmc_mtxpool, pm);
1221 newvalue = PMC_PCPU_SAVED(cpu,ri) =
1222 pp->pp_pmcs[ri].pp_pmcval;
1223 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1225 KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC,
1226 ("[pmc,%d] illegal mode=%d", __LINE__,
1228 mtx_pool_lock_spin(pmc_mtxpool, pm);
1229 newvalue = PMC_PCPU_SAVED(cpu, ri) =
1230 pm->pm_gv.pm_savedvalue;
1231 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1234 PMCDBG(CSW,SWI,1,"cpu=%d ri=%d new=%jd", cpu, ri, newvalue);
1236 md->pmd_write_pmc(cpu, ri, newvalue);
1237 md->pmd_start_pmc(cpu, ri);
1241 * perform any other architecture/cpu dependent thread
1242 * switch-in actions.
1245 (void) (*md->pmd_switch_in)(pc, pp);
1252 * Thread context switch OUT.
1256 pmc_process_csw_out(struct thread *td)
1264 struct pmc_process *pp;
1266 pmc_value_t newvalue;
1269 * Locate our process descriptor; this may be NULL if
1270 * this process is exiting and we have already removed
1271 * the process from the target process table.
1273 * Note that due to kernel preemption, multiple
1274 * context switches may happen while the process is
1277 * Note also that if the target process cannot be
1278 * found we still need to deconfigure any PMCs that
1279 * are currently running on hardware.
1283 pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE);
1291 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1293 PMCDBG(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1294 p->p_pid, p->p_comm, pp);
1296 KASSERT(cpu >= 0 && cpu < mp_ncpus,
1297 ("[pmc,%d wierd CPU id %d", __LINE__, cpu));
1302 * When a PMC gets unlinked from a target PMC, it will
1303 * be removed from the target's pp_pmc[] array.
1305 * However, on a MP system, the target could have been
1306 * executing on another CPU at the time of the unlink.
1307 * So, at context switch OUT time, we need to look at
1308 * the hardware to determine if a PMC is scheduled on
1312 for (ri = 0; ri < md->pmd_npmc; ri++) {
1315 (void) (*md->pmd_get_config)(cpu, ri, &pm);
1317 if (pm == NULL) /* nothing at this row index */
1320 mode = PMC_TO_MODE(pm);
1321 if (!PMC_IS_VIRTUAL_MODE(mode))
1322 continue; /* not a process virtual PMC */
1324 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1325 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
1326 __LINE__, PMC_TO_ROWINDEX(pm), ri));
1328 /* Stop hardware if not already stopped */
1329 if (pm->pm_stalled == 0)
1330 md->pmd_stop_pmc(cpu, ri);
1332 /* reduce this PMC's runcount */
1333 atomic_subtract_rel_32(&pm->pm_runcount, 1);
1336 * If this PMC is associated with this process,
1340 if (pp != NULL && pp->pp_pmcs[ri].pp_pmc != NULL) {
1342 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
1343 ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__,
1344 pm, ri, pp->pp_pmcs[ri].pp_pmc));
1346 KASSERT(pp->pp_refcnt > 0,
1347 ("[pmc,%d] pp refcnt = %d", __LINE__,
1350 md->pmd_read_pmc(cpu, ri, &newvalue);
1352 tmp = newvalue - PMC_PCPU_SAVED(cpu,ri);
1354 PMCDBG(CSW,SWI,1,"cpu=%d ri=%d tmp=%jd", cpu, ri,
1357 if (mode == PMC_MODE_TS) {
1360 * For sampling process-virtual PMCs,
1361 * we expect the count to be
1362 * decreasing as the 'value'
1363 * programmed into the PMC is the
1364 * number of events to be seen till
1365 * the next sampling interrupt.
1368 tmp += pm->pm_sc.pm_reloadcount;
1369 mtx_pool_lock_spin(pmc_mtxpool, pm);
1370 pp->pp_pmcs[ri].pp_pmcval -= tmp;
1371 if ((int64_t) pp->pp_pmcs[ri].pp_pmcval < 0)
1372 pp->pp_pmcs[ri].pp_pmcval +=
1373 pm->pm_sc.pm_reloadcount;
1374 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1379 * For counting process-virtual PMCs,
1380 * we expect the count to be
1381 * increasing monotonically, modulo a 64
1384 KASSERT((int64_t) tmp >= 0,
1385 ("[pmc,%d] negative increment cpu=%d "
1386 "ri=%d newvalue=%jx saved=%jx "
1387 "incr=%jx", __LINE__, cpu, ri,
1388 newvalue, PMC_PCPU_SAVED(cpu,ri), tmp));
1390 mtx_pool_lock_spin(pmc_mtxpool, pm);
1391 pm->pm_gv.pm_savedvalue += tmp;
1392 pp->pp_pmcs[ri].pp_pmcval += tmp;
1393 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1395 if (pm->pm_flags & PMC_F_LOG_PROCCSW)
1396 pmclog_process_proccsw(pm, pp, tmp);
1400 /* mark hardware as free */
1401 md->pmd_config_pmc(cpu, ri, NULL);
1405 * perform any other architecture/cpu dependent thread
1406 * switch out functions.
1409 (void) (*md->pmd_switch_out)(pc, pp);
1415 * Log a KLD operation.
1419 pmc_process_kld_load(struct pmckern_map_in *pkm)
1421 struct pmc_owner *po;
1423 sx_assert(&pmc_sx, SX_LOCKED);
1426 * Notify owners of system sampling PMCs about KLD operations.
1429 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1430 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1431 pmclog_process_map_in(po, (pid_t) -1, pkm->pm_address,
1432 (char *) pkm->pm_file);
1435 * TODO: Notify owners of (all) process-sampling PMCs too.
1442 pmc_process_kld_unload(struct pmckern_map_out *pkm)
1444 struct pmc_owner *po;
1446 sx_assert(&pmc_sx, SX_LOCKED);
1448 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1449 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1450 pmclog_process_map_out(po, (pid_t) -1,
1451 pkm->pm_address, pkm->pm_address + pkm->pm_size);
1454 * TODO: Notify owners of process-sampling PMCs.
1459 * A mapping change for a process.
1463 pmc_process_mmap(struct thread *td, struct pmckern_map_in *pkm)
1467 char *fullpath, *freepath;
1468 const struct pmc *pm;
1469 struct pmc_owner *po;
1470 const struct pmc_process *pp;
1472 freepath = fullpath = NULL;
1473 pmc_getfilename((struct vnode *) pkm->pm_file, &fullpath, &freepath);
1475 pid = td->td_proc->p_pid;
1477 /* Inform owners of all system-wide sampling PMCs. */
1478 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1479 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1480 pmclog_process_map_in(po, pid, pkm->pm_address, fullpath);
1482 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1486 * Inform sampling PMC owners tracking this process.
1488 for (ri = 0; ri < md->pmd_npmc; ri++)
1489 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
1490 PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1491 pmclog_process_map_in(pm->pm_owner,
1492 pid, pkm->pm_address, fullpath);
1496 FREE(freepath, M_TEMP);
1501 * Log an munmap request.
1505 pmc_process_munmap(struct thread *td, struct pmckern_map_out *pkm)
1509 struct pmc_owner *po;
1510 const struct pmc *pm;
1511 const struct pmc_process *pp;
1513 pid = td->td_proc->p_pid;
1515 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1516 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1517 pmclog_process_map_out(po, pid, pkm->pm_address,
1518 pkm->pm_address + pkm->pm_size);
1520 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1523 for (ri = 0; ri < md->pmd_npmc; ri++)
1524 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
1525 PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1526 pmclog_process_map_out(pm->pm_owner, pid,
1527 pkm->pm_address, pkm->pm_address + pkm->pm_size);
1531 * The 'hook' invoked from the kernel proper
1536 const char *pmc_hooknames[] = {
1537 /* these strings correspond to PMC_FN_* in <sys/pmckern.h> */
1551 pmc_hook_handler(struct thread *td, int function, void *arg)
1554 PMCDBG(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function,
1555 pmc_hooknames[function], arg);
1564 case PMC_FN_PROCESS_EXEC:
1566 char *fullpath, *freepath;
1568 int is_using_hwpmcs;
1571 struct pmc_owner *po;
1572 struct pmc_process *pp;
1573 struct pmckern_procexec *pk;
1575 sx_assert(&pmc_sx, SX_XLOCKED);
1578 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
1580 pk = (struct pmckern_procexec *) arg;
1582 /* Inform owners of SS mode PMCs of the exec event. */
1583 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1584 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1585 pmclog_process_procexec(po, PMC_ID_INVALID,
1586 p->p_pid, pk->pm_entryaddr, fullpath);
1589 is_using_hwpmcs = p->p_flag & P_HWPMC;
1592 if (!is_using_hwpmcs) {
1594 FREE(freepath, M_TEMP);
1599 * PMCs are not inherited across an exec(): remove any
1600 * PMCs that this process is the owner of.
1603 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
1604 pmc_remove_owner(po);
1605 pmc_destroy_owner_descriptor(po);
1609 * If the process being exec'ed is not the target of any
1612 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL) {
1614 FREE(freepath, M_TEMP);
1619 * Log the exec event to all monitoring owners. Skip
1620 * owners who have already recieved the event because
1621 * they had system sampling PMCs active.
1623 for (ri = 0; ri < md->pmd_npmc; ri++)
1624 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
1626 if (po->po_sscount == 0 &&
1627 po->po_flags & PMC_PO_OWNS_LOGFILE)
1628 pmclog_process_procexec(po, pm->pm_id,
1629 p->p_pid, pk->pm_entryaddr,
1634 FREE(freepath, M_TEMP);
1637 PMCDBG(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d",
1638 p, p->p_pid, p->p_comm, pk->pm_credentialschanged);
1640 if (pk->pm_credentialschanged == 0) /* no change */
1644 * If the newly exec()'ed process has a different credential
1645 * than before, allow it to be the target of a PMC only if
1646 * the PMC's owner has sufficient priviledge.
1649 for (ri = 0; ri < md->pmd_npmc; ri++)
1650 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL)
1651 if (pmc_can_attach(pm, td->td_proc) != 0)
1652 pmc_detach_one_process(td->td_proc,
1655 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < (int) md->pmd_npmc,
1656 ("[pmc,%d] Illegal ref count %d on pp %p", __LINE__,
1657 pp->pp_refcnt, pp));
1660 * If this process is no longer the target of any
1661 * PMCs, we can remove the process entry and free
1665 if (pp->pp_refcnt == 0) {
1666 pmc_remove_process_descriptor(pp);
1675 pmc_process_csw_in(td);
1678 case PMC_FN_CSW_OUT:
1679 pmc_process_csw_out(td);
1683 * Process accumulated PC samples.
1685 * This function is expected to be called by hardclock() for
1686 * each CPU that has accumulated PC samples.
1688 * This function is to be executed on the CPU whose samples
1689 * are being processed.
1691 case PMC_FN_DO_SAMPLES:
1694 * Clear the cpu specific bit in the CPU mask before
1695 * do the rest of the processing. If the NMI handler
1696 * gets invoked after the "atomic_clear_int()" call
1697 * below but before "pmc_process_samples()" gets
1698 * around to processing the interrupt, then we will
1699 * come back here at the next hardclock() tick (and
1700 * may find nothing to do if "pmc_process_samples()"
1701 * had already processed the interrupt). We don't
1702 * lose the interrupt sample.
1704 atomic_clear_int(&pmc_cpumask, (1 << PCPU_GET(cpuid)));
1705 pmc_process_samples(PCPU_GET(cpuid));
1709 case PMC_FN_KLD_LOAD:
1710 sx_assert(&pmc_sx, SX_LOCKED);
1711 pmc_process_kld_load((struct pmckern_map_in *) arg);
1714 case PMC_FN_KLD_UNLOAD:
1715 sx_assert(&pmc_sx, SX_LOCKED);
1716 pmc_process_kld_unload((struct pmckern_map_out *) arg);
1720 sx_assert(&pmc_sx, SX_LOCKED);
1721 pmc_process_mmap(td, (struct pmckern_map_in *) arg);
1725 sx_assert(&pmc_sx, SX_LOCKED);
1726 pmc_process_munmap(td, (struct pmckern_map_out *) arg);
1731 KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function));
1741 * allocate a 'struct pmc_owner' descriptor in the owner hash table.
1744 static struct pmc_owner *
1745 pmc_allocate_owner_descriptor(struct proc *p)
1748 struct pmc_owner *po;
1749 struct pmc_ownerhash *poh;
1751 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
1752 poh = &pmc_ownerhash[hindex];
1754 /* allocate space for N pointers and one descriptor struct */
1755 MALLOC(po, struct pmc_owner *, sizeof(struct pmc_owner),
1756 M_PMC, M_ZERO|M_WAITOK);
1758 po->po_sscount = po->po_error = po->po_flags = 0;
1761 po->po_kthread = NULL;
1762 LIST_INIT(&po->po_pmcs);
1763 LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */
1765 TAILQ_INIT(&po->po_logbuffers);
1766 mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc-per-proc", MTX_SPIN);
1768 PMCDBG(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p",
1769 p, p->p_pid, p->p_comm, po);
1775 pmc_destroy_owner_descriptor(struct pmc_owner *po)
1778 PMCDBG(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)",
1779 po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm);
1781 mtx_destroy(&po->po_mtx);
1786 * find the descriptor corresponding to process 'p', adding or removing it
1787 * as specified by 'mode'.
1790 static struct pmc_process *
1791 pmc_find_process_descriptor(struct proc *p, uint32_t mode)
1794 struct pmc_process *pp, *ppnew;
1795 struct pmc_processhash *pph;
1797 hindex = PMC_HASH_PTR(p, pmc_processhashmask);
1798 pph = &pmc_processhash[hindex];
1803 * Pre-allocate memory in the FIND_ALLOCATE case since we
1804 * cannot call malloc(9) once we hold a spin lock.
1807 if (mode & PMC_FLAG_ALLOCATE) {
1808 /* allocate additional space for 'n' pmc pointers */
1809 MALLOC(ppnew, struct pmc_process *,
1810 sizeof(struct pmc_process) + md->pmd_npmc *
1811 sizeof(struct pmc_targetstate), M_PMC, M_ZERO|M_WAITOK);
1814 mtx_lock_spin(&pmc_processhash_mtx);
1815 LIST_FOREACH(pp, pph, pp_next)
1816 if (pp->pp_proc == p)
1819 if ((mode & PMC_FLAG_REMOVE) && pp != NULL)
1820 LIST_REMOVE(pp, pp_next);
1822 if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL &&
1825 LIST_INSERT_HEAD(pph, ppnew, pp_next);
1829 mtx_unlock_spin(&pmc_processhash_mtx);
1831 if (pp != NULL && ppnew != NULL)
1838 * remove a process descriptor from the process hash table.
1842 pmc_remove_process_descriptor(struct pmc_process *pp)
1844 KASSERT(pp->pp_refcnt == 0,
1845 ("[pmc,%d] Removing process descriptor %p with count %d",
1846 __LINE__, pp, pp->pp_refcnt));
1848 mtx_lock_spin(&pmc_processhash_mtx);
1849 LIST_REMOVE(pp, pp_next);
1850 mtx_unlock_spin(&pmc_processhash_mtx);
1855 * find an owner descriptor corresponding to proc 'p'
1858 static struct pmc_owner *
1859 pmc_find_owner_descriptor(struct proc *p)
1862 struct pmc_owner *po;
1863 struct pmc_ownerhash *poh;
1865 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
1866 poh = &pmc_ownerhash[hindex];
1869 LIST_FOREACH(po, poh, po_next)
1870 if (po->po_owner == p)
1873 PMCDBG(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> "
1874 "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po);
1880 * pmc_allocate_pmc_descriptor
1882 * Allocate a pmc descriptor and initialize its
1887 pmc_allocate_pmc_descriptor(void)
1891 MALLOC(pmc, struct pmc *, sizeof(struct pmc), M_PMC, M_ZERO|M_WAITOK);
1894 pmc->pm_owner = NULL;
1895 LIST_INIT(&pmc->pm_targets);
1898 PMCDBG(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc);
1904 * Destroy a pmc descriptor.
1908 pmc_destroy_pmc_descriptor(struct pmc *pm)
1913 KASSERT(pm->pm_state == PMC_STATE_DELETED ||
1914 pm->pm_state == PMC_STATE_FREE,
1915 ("[pmc,%d] destroying non-deleted PMC", __LINE__));
1916 KASSERT(LIST_EMPTY(&pm->pm_targets),
1917 ("[pmc,%d] destroying pmc with targets", __LINE__));
1918 KASSERT(pm->pm_owner == NULL,
1919 ("[pmc,%d] destroying pmc attached to an owner", __LINE__));
1920 KASSERT(pm->pm_runcount == 0,
1921 ("[pmc,%d] pmc has non-zero run count %d", __LINE__,
1927 pmc_wait_for_pmc_idle(struct pmc *pm)
1930 volatile int maxloop;
1932 maxloop = 100 * mp_ncpus;
1936 * Loop (with a forced context switch) till the PMC's runcount
1937 * comes down to zero.
1939 while (atomic_load_acq_32(&pm->pm_runcount) > 0) {
1942 KASSERT(maxloop > 0,
1943 ("[pmc,%d] (ri%d, rc%d) waiting too long for "
1944 "pmc to be free", __LINE__,
1945 PMC_TO_ROWINDEX(pm), pm->pm_runcount));
1947 pmc_force_context_switch();
1952 * This function does the following things:
1954 * - detaches the PMC from hardware
1955 * - unlinks all target threads that were attached to it
1956 * - removes the PMC from its owner's list
1957 * - destroy's the PMC private mutex
1959 * Once this function completes, the given pmc pointer can be safely
1960 * FREE'd by the caller.
1964 pmc_release_pmc_descriptor(struct pmc *pm)
1969 struct pmc_owner *po;
1970 struct pmc_process *pp;
1971 struct pmc_target *ptgt, *tmp;
1972 struct pmc_binding pb;
1974 sx_assert(&pmc_sx, SX_XLOCKED);
1976 KASSERT(pm, ("[pmc,%d] null pmc", __LINE__));
1978 ri = PMC_TO_ROWINDEX(pm);
1979 mode = PMC_TO_MODE(pm);
1981 PMCDBG(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri,
1985 * First, we take the PMC off hardware.
1988 if (PMC_IS_SYSTEM_MODE(mode)) {
1991 * A system mode PMC runs on a specific CPU. Switch
1992 * to this CPU and turn hardware off.
1994 pmc_save_cpu_binding(&pb);
1996 cpu = PMC_TO_CPU(pm);
1998 pmc_select_cpu(cpu);
2000 /* switch off non-stalled CPUs */
2001 if (pm->pm_state == PMC_STATE_RUNNING &&
2002 pm->pm_stalled == 0) {
2004 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
2006 KASSERT(phw->phw_pmc == pm,
2007 ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)",
2008 __LINE__, ri, phw->phw_pmc, pm));
2009 PMCDBG(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri);
2012 md->pmd_stop_pmc(cpu, ri);
2016 PMCDBG(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri);
2019 md->pmd_config_pmc(cpu, ri, NULL);
2022 /* adjust the global and process count of SS mode PMCs */
2023 if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) {
2026 if (po->po_sscount == 0) {
2027 atomic_subtract_rel_int(&pmc_ss_count, 1);
2028 LIST_REMOVE(po, po_ssnext);
2032 pm->pm_state = PMC_STATE_DELETED;
2034 pmc_restore_cpu_binding(&pb);
2037 * We could have references to this PMC structure in
2038 * the per-cpu sample queues. Wait for the queue to
2041 pmc_wait_for_pmc_idle(pm);
2043 } else if (PMC_IS_VIRTUAL_MODE(mode)) {
2046 * A virtual PMC could be running on multiple CPUs at
2049 * By marking its state as DELETED, we ensure that
2050 * this PMC is never further scheduled on hardware.
2052 * Then we wait till all CPUs are done with this PMC.
2054 pm->pm_state = PMC_STATE_DELETED;
2057 /* Wait for the PMCs runcount to come to zero. */
2058 pmc_wait_for_pmc_idle(pm);
2061 * At this point the PMC is off all CPUs and cannot be
2062 * freshly scheduled onto a CPU. It is now safe to
2063 * unlink all targets from this PMC. If a
2064 * process-record's refcount falls to zero, we remove
2065 * it from the hash table. The module-wide SX lock
2066 * protects us from races.
2068 LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) {
2069 pp = ptgt->pt_process;
2070 pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */
2072 PMCDBG(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt);
2075 * If the target process record shows that no
2076 * PMCs are attached to it, reclaim its space.
2079 if (pp->pp_refcnt == 0) {
2080 pmc_remove_process_descriptor(pp);
2085 cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */
2090 * Release any MD resources
2093 (void) md->pmd_release_pmc(cpu, ri, pm);
2096 * Update row disposition
2099 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm)))
2100 PMC_UNMARK_ROW_STANDALONE(ri);
2102 PMC_UNMARK_ROW_THREAD(ri);
2104 /* unlink from the owner's list */
2106 LIST_REMOVE(pm, pm_next);
2107 pm->pm_owner = NULL;
2110 pmc_destroy_pmc_descriptor(pm);
2114 * Register an owner and a pmc.
2118 pmc_register_owner(struct proc *p, struct pmc *pmc)
2120 struct pmc_owner *po;
2122 sx_assert(&pmc_sx, SX_XLOCKED);
2124 if ((po = pmc_find_owner_descriptor(p)) == NULL)
2125 if ((po = pmc_allocate_owner_descriptor(p)) == NULL)
2128 KASSERT(pmc->pm_owner == NULL,
2129 ("[pmc,%d] attempting to own an initialized PMC", __LINE__));
2132 LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next);
2135 p->p_flag |= P_HWPMC;
2138 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
2139 pmclog_process_pmcallocate(pmc);
2141 PMCDBG(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p",
2148 * Return the current row disposition:
2150 * > 0 => PROCESS MODE
2151 * < 0 => SYSTEM MODE
2155 pmc_getrowdisp(int ri)
2157 return pmc_pmcdisp[ri];
2161 * Check if a PMC at row index 'ri' can be allocated to the current
2164 * Allocation can fail if:
2165 * - the current process is already being profiled by a PMC at index 'ri',
2166 * attached to it via OP_PMCATTACH.
2167 * - the current process has already allocated a PMC at index 'ri'
2172 pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu)
2176 struct pmc_owner *po;
2177 struct pmc_process *pp;
2179 PMCDBG(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d "
2180 "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu);
2183 * We shouldn't have already allocated a process-mode PMC at
2186 * We shouldn't have allocated a system-wide PMC on the same
2189 if ((po = pmc_find_owner_descriptor(p)) != NULL)
2190 LIST_FOREACH(pm, &po->po_pmcs, pm_next) {
2191 if (PMC_TO_ROWINDEX(pm) == ri) {
2192 mode = PMC_TO_MODE(pm);
2193 if (PMC_IS_VIRTUAL_MODE(mode))
2195 if (PMC_IS_SYSTEM_MODE(mode) &&
2196 (int) PMC_TO_CPU(pm) == cpu)
2202 * We also shouldn't be the target of any PMC at this index
2203 * since otherwise a PMC_ATTACH to ourselves will fail.
2205 if ((pp = pmc_find_process_descriptor(p, 0)) != NULL)
2206 if (pp->pp_pmcs[ri].pp_pmc)
2209 PMCDBG(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok",
2210 p, p->p_pid, p->p_comm, ri);
2216 * Check if a given PMC at row index 'ri' can be currently used in
2221 pmc_can_allocate_row(int ri, enum pmc_mode mode)
2225 sx_assert(&pmc_sx, SX_XLOCKED);
2227 PMCDBG(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode);
2229 if (PMC_IS_SYSTEM_MODE(mode))
2230 disp = PMC_DISP_STANDALONE;
2232 disp = PMC_DISP_THREAD;
2235 * check disposition for PMC row 'ri':
2237 * Expected disposition Row-disposition Result
2239 * STANDALONE STANDALONE or FREE proceed
2240 * STANDALONE THREAD fail
2241 * THREAD THREAD or FREE proceed
2242 * THREAD STANDALONE fail
2245 if (!PMC_ROW_DISP_IS_FREE(ri) &&
2246 !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)) &&
2247 !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri)))
2254 PMCDBG(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode);
2261 * Find a PMC descriptor with user handle 'pmcid' for thread 'td'.
2265 pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid)
2269 KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc,
2270 ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__,
2271 PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc));
2273 LIST_FOREACH(pm, &po->po_pmcs, pm_next)
2274 if (pm->pm_id == pmcid)
2281 pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc)
2285 struct pmc_owner *po;
2287 PMCDBG(PMC,FND,1, "find-pmc id=%d", pmcid);
2289 if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL)
2292 if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL)
2295 PMCDBG(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm);
2306 pmc_start(struct pmc *pm)
2310 struct pmc_owner *po;
2311 struct pmc_binding pb;
2314 ("[pmc,%d] null pm", __LINE__));
2316 mode = PMC_TO_MODE(pm);
2317 ri = PMC_TO_ROWINDEX(pm);
2320 PMCDBG(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, mode, ri);
2324 if (PMC_IS_VIRTUAL_MODE(mode)) {
2327 * If a PMCATTACH has never been done on this PMC,
2328 * attach it to its owner process.
2331 if (LIST_EMPTY(&pm->pm_targets))
2332 error = (pm->pm_flags & PMC_F_ATTACH_DONE) ? ESRCH :
2333 pmc_attach_process(po->po_owner, pm);
2336 * Disallow PMCSTART if a logfile is required but has not
2337 * been configured yet.
2340 if (error == 0 && (pm->pm_flags & PMC_F_NEEDS_LOGFILE) &&
2341 (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
2345 * If the PMC is attached to its owner, then force a context
2346 * switch to ensure that the MD state gets set correctly.
2350 pm->pm_state = PMC_STATE_RUNNING;
2351 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER)
2352 pmc_force_context_switch();
2360 * A system-wide PMC.
2363 if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) &&
2364 (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
2365 return EDOOFUS; /* programming error */
2368 * Add the owner to the global list if this is a system-wide
2372 if (mode == PMC_MODE_SS) {
2373 if (po->po_sscount == 0) {
2374 LIST_INSERT_HEAD(&pmc_ss_owners, po, po_ssnext);
2375 atomic_add_rel_int(&pmc_ss_count, 1);
2376 PMCDBG(PMC,OPS,1, "po=%p in global list", po);
2381 /* TODO: dump system wide process mappings to the log? */
2384 * Move to the CPU associated with this
2385 * PMC, and start the hardware.
2388 pmc_save_cpu_binding(&pb);
2390 cpu = PMC_TO_CPU(pm);
2392 if (pmc_cpu_is_disabled(cpu))
2395 pmc_select_cpu(cpu);
2398 * global PMCs are configured at allocation time
2399 * so write out the initial value and start the PMC.
2402 pm->pm_state = PMC_STATE_RUNNING;
2405 if ((error = md->pmd_write_pmc(cpu, ri,
2406 PMC_IS_SAMPLING_MODE(mode) ?
2407 pm->pm_sc.pm_reloadcount :
2408 pm->pm_sc.pm_initial)) == 0)
2409 error = md->pmd_start_pmc(cpu, ri);
2412 pmc_restore_cpu_binding(&pb);
2422 pmc_stop(struct pmc *pm)
2425 struct pmc_owner *po;
2426 struct pmc_binding pb;
2428 KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__));
2430 PMCDBG(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm,
2431 PMC_TO_MODE(pm), PMC_TO_ROWINDEX(pm));
2433 pm->pm_state = PMC_STATE_STOPPED;
2436 * If the PMC is a virtual mode one, changing the state to
2437 * non-RUNNING is enough to ensure that the PMC never gets
2440 * If this PMC is current running on a CPU, then it will
2441 * handled correctly at the time its target process is context
2445 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
2449 * A system-mode PMC. Move to the CPU associated with
2450 * this PMC, and stop the hardware. We update the
2451 * 'initial count' so that a subsequent PMCSTART will
2452 * resume counting from the current hardware count.
2455 pmc_save_cpu_binding(&pb);
2457 cpu = PMC_TO_CPU(pm);
2459 KASSERT(cpu >= 0 && cpu < mp_ncpus,
2460 ("[pmc,%d] illegal cpu=%d", __LINE__, cpu));
2462 if (pmc_cpu_is_disabled(cpu))
2465 pmc_select_cpu(cpu);
2467 ri = PMC_TO_ROWINDEX(pm);
2470 if ((error = md->pmd_stop_pmc(cpu, ri)) == 0)
2471 error = md->pmd_read_pmc(cpu, ri, &pm->pm_sc.pm_initial);
2474 pmc_restore_cpu_binding(&pb);
2478 /* remove this owner from the global list of SS PMC owners */
2479 if (PMC_TO_MODE(pm) == PMC_MODE_SS) {
2481 if (po->po_sscount == 0) {
2482 atomic_subtract_rel_int(&pmc_ss_count, 1);
2483 LIST_REMOVE(po, po_ssnext);
2484 PMCDBG(PMC,OPS,2,"po=%p removed from global list", po);
2493 static const char *pmc_op_to_name[] = {
2495 #define __PMC_OP(N, D) #N ,
2502 * The syscall interface
2505 #define PMC_GET_SX_XLOCK(...) do { \
2506 sx_xlock(&pmc_sx); \
2507 if (pmc_hook == NULL) { \
2508 sx_xunlock(&pmc_sx); \
2509 return __VA_ARGS__; \
2513 #define PMC_DOWNGRADE_SX() do { \
2514 sx_downgrade(&pmc_sx); \
2515 is_sx_downgraded = 1; \
2519 pmc_syscall_handler(struct thread *td, void *syscall_args)
2521 int error, is_sx_downgraded, op;
2522 struct pmc_syscall_args *c;
2525 PMC_GET_SX_XLOCK(ENOSYS);
2529 is_sx_downgraded = 0;
2531 c = (struct pmc_syscall_args *) syscall_args;
2536 PMCDBG(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op,
2537 pmc_op_to_name[op], arg);
2540 atomic_add_int(&pmc_stats.pm_syscalls, 1);
2547 * Configure a log file.
2549 * XXX This OP will be reworked.
2552 case PMC_OP_CONFIGURELOG:
2556 struct pmc_owner *po;
2557 struct pmckern_map_in *km, *kmbase;
2558 struct pmc_op_configurelog cl;
2560 sx_assert(&pmc_sx, SX_XLOCKED);
2562 if ((error = copyin(arg, &cl, sizeof(cl))) != 0)
2565 /* mark this process as owning a log file */
2567 if ((po = pmc_find_owner_descriptor(p)) == NULL)
2568 if ((po = pmc_allocate_owner_descriptor(p)) == NULL) {
2574 * If a valid fd was passed in, try to configure that,
2575 * otherwise if 'fd' was less than zero and there was
2576 * a log file configured, flush its buffers and
2579 if (cl.pm_logfd >= 0)
2580 error = pmclog_configure_log(po, cl.pm_logfd);
2581 else if (po->po_flags & PMC_PO_OWNS_LOGFILE) {
2582 pmclog_process_closelog(po);
2583 error = pmclog_flush(po);
2585 LIST_FOREACH(pm, &po->po_pmcs, pm_next)
2586 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
2587 pm->pm_state == PMC_STATE_RUNNING)
2589 error = pmclog_deconfigure_log(po);
2598 * Log the current set of kernel modules.
2600 kmbase = linker_hwpmc_list_objects();
2601 for (km = kmbase; km->pm_file != NULL; km++) {
2602 PMCDBG(LOG,REG,1,"%s %p", (char *) km->pm_file,
2603 (void *) km->pm_address);
2604 pmclog_process_map_in(po, (pid_t) -1, km->pm_address,
2607 FREE(kmbase, M_LINKER);
2616 case PMC_OP_FLUSHLOG:
2618 struct pmc_owner *po;
2620 sx_assert(&pmc_sx, SX_XLOCKED);
2622 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
2627 error = pmclog_flush(po);
2632 * Retrieve hardware configuration.
2635 case PMC_OP_GETCPUINFO: /* CPU information */
2637 struct pmc_op_getcpuinfo gci;
2639 gci.pm_cputype = md->pmd_cputype;
2640 gci.pm_ncpu = mp_ncpus;
2641 gci.pm_npmc = md->pmd_npmc;
2642 gci.pm_nclass = md->pmd_nclass;
2643 bcopy(md->pmd_classes, &gci.pm_classes,
2644 sizeof(gci.pm_classes));
2645 error = copyout(&gci, arg, sizeof(gci));
2651 * Get module statistics
2654 case PMC_OP_GETDRIVERSTATS:
2656 struct pmc_op_getdriverstats gms;
2658 bcopy(&pmc_stats, &gms, sizeof(gms));
2659 error = copyout(&gms, arg, sizeof(gms));
2665 * Retrieve module version number
2668 case PMC_OP_GETMODULEVERSION:
2672 /* retrieve the client's idea of the ABI version */
2673 if ((error = copyin(arg, &cv, sizeof(uint32_t))) != 0)
2675 /* don't service clients newer than our driver */
2677 if ((cv & 0xFFFF0000) > (modv & 0xFFFF0000)) {
2678 error = EPROGMISMATCH;
2681 error = copyout(&modv, arg, sizeof(int));
2687 * Retrieve the state of all the PMCs on a given
2691 case PMC_OP_GETPMCINFO:
2693 uint32_t cpu, n, npmc;
2694 size_t pmcinfo_size;
2696 struct pmc_info *p, *pmcinfo;
2697 struct pmc_op_getpmcinfo *gpi;
2698 struct pmc_owner *po;
2699 struct pmc_binding pb;
2703 gpi = (struct pmc_op_getpmcinfo *) arg;
2705 if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0)
2708 if (cpu >= (unsigned int) mp_ncpus) {
2713 if (pmc_cpu_is_disabled(cpu)) {
2718 /* switch to CPU 'cpu' */
2719 pmc_save_cpu_binding(&pb);
2720 pmc_select_cpu(cpu);
2722 npmc = md->pmd_npmc;
2724 pmcinfo_size = npmc * sizeof(struct pmc_info);
2725 MALLOC(pmcinfo, struct pmc_info *, pmcinfo_size, M_PMC,
2730 for (n = 0; n < md->pmd_npmc; n++, p++) {
2732 if ((error = md->pmd_describe(cpu, n, p, &pm)) != 0)
2735 if (PMC_ROW_DISP_IS_STANDALONE(n))
2736 p->pm_rowdisp = PMC_DISP_STANDALONE;
2737 else if (PMC_ROW_DISP_IS_THREAD(n))
2738 p->pm_rowdisp = PMC_DISP_THREAD;
2740 p->pm_rowdisp = PMC_DISP_FREE;
2742 p->pm_ownerpid = -1;
2744 if (pm == NULL) /* no PMC associated */
2749 KASSERT(po->po_owner != NULL,
2750 ("[pmc,%d] pmc_owner had a null proc pointer",
2753 p->pm_ownerpid = po->po_owner->p_pid;
2754 p->pm_mode = PMC_TO_MODE(pm);
2755 p->pm_event = pm->pm_event;
2756 p->pm_flags = pm->pm_flags;
2758 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
2760 pm->pm_sc.pm_reloadcount;
2763 pmc_restore_cpu_binding(&pb);
2765 /* now copy out the PMC info collected */
2767 error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size);
2769 FREE(pmcinfo, M_PMC);
2775 * Set the administrative state of a PMC. I.e. whether
2776 * the PMC is to be used or not.
2779 case PMC_OP_PMCADMIN:
2782 enum pmc_state request;
2785 struct pmc_op_pmcadmin pma;
2786 struct pmc_binding pb;
2788 sx_assert(&pmc_sx, SX_XLOCKED);
2790 KASSERT(td == curthread,
2791 ("[pmc,%d] td != curthread", __LINE__));
2793 error = priv_check(td, PRIV_PMC_MANAGE);
2797 if ((error = copyin(arg, &pma, sizeof(pma))) != 0)
2802 if (cpu < 0 || cpu >= mp_ncpus) {
2807 if (pmc_cpu_is_disabled(cpu)) {
2812 request = pma.pm_state;
2814 if (request != PMC_STATE_DISABLED &&
2815 request != PMC_STATE_FREE) {
2820 ri = pma.pm_pmc; /* pmc id == row index */
2821 if (ri < 0 || ri >= (int) md->pmd_npmc) {
2827 * We can't disable a PMC with a row-index allocated
2828 * for process virtual PMCs.
2831 if (PMC_ROW_DISP_IS_THREAD(ri) &&
2832 request == PMC_STATE_DISABLED) {
2838 * otherwise, this PMC on this CPU is either free or
2839 * in system-wide mode.
2842 pmc_save_cpu_binding(&pb);
2843 pmc_select_cpu(cpu);
2846 phw = pc->pc_hwpmcs[ri];
2849 * XXX do we need some kind of 'forced' disable?
2852 if (phw->phw_pmc == NULL) {
2853 if (request == PMC_STATE_DISABLED &&
2854 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) {
2855 phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED;
2856 PMC_MARK_ROW_STANDALONE(ri);
2857 } else if (request == PMC_STATE_FREE &&
2858 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) {
2859 phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED;
2860 PMC_UNMARK_ROW_STANDALONE(ri);
2862 /* other cases are a no-op */
2866 pmc_restore_cpu_binding(&pb);
2875 case PMC_OP_PMCALLOCATE:
2883 struct pmc_op_pmcallocate pa;
2884 struct pmc_binding pb;
2886 if ((error = copyin(arg, &pa, sizeof(pa))) != 0)
2893 if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC &&
2894 mode != PMC_MODE_TS && mode != PMC_MODE_TC) ||
2895 (cpu != (u_int) PMC_CPU_ANY && cpu >= (u_int) mp_ncpus)) {
2901 * Virtual PMCs should only ask for a default CPU.
2902 * System mode PMCs need to specify a non-default CPU.
2905 if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != (u_int) PMC_CPU_ANY) ||
2906 (PMC_IS_SYSTEM_MODE(mode) && cpu == (u_int) PMC_CPU_ANY)) {
2912 * Check that a disabled CPU is not being asked for.
2915 if (PMC_IS_SYSTEM_MODE(mode) && pmc_cpu_is_disabled(cpu)) {
2921 * Refuse an allocation for a system-wide PMC if this
2922 * process has been jailed, or if this process lacks
2923 * super-user credentials and the sysctl tunable
2924 * 'security.bsd.unprivileged_syspmcs' is zero.
2927 if (PMC_IS_SYSTEM_MODE(mode)) {
2928 if (jailed(curthread->td_ucred)) {
2932 if (!pmc_unprivileged_syspmcs) {
2933 error = priv_check(curthread,
2944 * Look for valid values for 'pm_flags'
2947 if ((pa.pm_flags & ~(PMC_F_DESCENDANTS | PMC_F_LOG_PROCCSW |
2948 PMC_F_LOG_PROCEXIT)) != 0) {
2953 /* process logging options are not allowed for system PMCs */
2954 if (PMC_IS_SYSTEM_MODE(mode) && (pa.pm_flags &
2955 (PMC_F_LOG_PROCCSW | PMC_F_LOG_PROCEXIT))) {
2961 * All sampling mode PMCs need to be able to interrupt the
2964 if (PMC_IS_SAMPLING_MODE(mode))
2965 caps |= PMC_CAP_INTERRUPT;
2967 /* A valid class specifier should have been passed in. */
2968 for (n = 0; n < md->pmd_nclass; n++)
2969 if (md->pmd_classes[n].pm_class == pa.pm_class)
2971 if (n == md->pmd_nclass) {
2976 /* The requested PMC capabilities should be feasible. */
2977 if ((md->pmd_classes[n].pm_caps & caps) != caps) {
2982 PMCDBG(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d",
2983 pa.pm_ev, caps, mode, cpu);
2985 pmc = pmc_allocate_pmc_descriptor();
2986 pmc->pm_id = PMC_ID_MAKE_ID(cpu,pa.pm_mode,pa.pm_class,
2988 pmc->pm_event = pa.pm_ev;
2989 pmc->pm_state = PMC_STATE_FREE;
2990 pmc->pm_caps = caps;
2991 pmc->pm_flags = pa.pm_flags;
2993 /* switch thread to CPU 'cpu' */
2994 pmc_save_cpu_binding(&pb);
2996 #define PMC_IS_SHAREABLE_PMC(cpu, n) \
2997 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state & \
2998 PMC_PHW_FLAG_IS_SHAREABLE)
2999 #define PMC_IS_UNALLOCATED(cpu, n) \
3000 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL)
3002 if (PMC_IS_SYSTEM_MODE(mode)) {
3003 pmc_select_cpu(cpu);
3004 for (n = 0; n < (int) md->pmd_npmc; n++)
3005 if (pmc_can_allocate_row(n, mode) == 0 &&
3006 pmc_can_allocate_rowindex(
3007 curthread->td_proc, n, cpu) == 0 &&
3008 (PMC_IS_UNALLOCATED(cpu, n) ||
3009 PMC_IS_SHAREABLE_PMC(cpu, n)) &&
3010 md->pmd_allocate_pmc(cpu, n, pmc,
3014 /* Process virtual mode */
3015 for (n = 0; n < (int) md->pmd_npmc; n++) {
3016 if (pmc_can_allocate_row(n, mode) == 0 &&
3017 pmc_can_allocate_rowindex(
3018 curthread->td_proc, n,
3019 PMC_CPU_ANY) == 0 &&
3020 md->pmd_allocate_pmc(curthread->td_oncpu,
3026 #undef PMC_IS_UNALLOCATED
3027 #undef PMC_IS_SHAREABLE_PMC
3029 pmc_restore_cpu_binding(&pb);
3031 if (n == (int) md->pmd_npmc) {
3032 pmc_destroy_pmc_descriptor(pmc);
3039 /* Fill in the correct value in the ID field */
3040 pmc->pm_id = PMC_ID_MAKE_ID(cpu,mode,pa.pm_class,n);
3042 PMCDBG(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x",
3043 pmc->pm_event, pa.pm_class, mode, n, pmc->pm_id);
3045 /* Process mode PMCs with logging enabled need log files */
3046 if (pmc->pm_flags & (PMC_F_LOG_PROCEXIT | PMC_F_LOG_PROCCSW))
3047 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
3049 /* All system mode sampling PMCs require a log file */
3050 if (PMC_IS_SAMPLING_MODE(mode) && PMC_IS_SYSTEM_MODE(mode))
3051 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
3054 * Configure global pmc's immediately
3057 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pmc))) {
3059 pmc_save_cpu_binding(&pb);
3060 pmc_select_cpu(cpu);
3062 phw = pmc_pcpu[cpu]->pc_hwpmcs[n];
3064 if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0 ||
3065 (error = md->pmd_config_pmc(cpu, n, pmc)) != 0) {
3066 (void) md->pmd_release_pmc(cpu, n, pmc);
3067 pmc_destroy_pmc_descriptor(pmc);
3070 pmc_restore_cpu_binding(&pb);
3075 pmc_restore_cpu_binding(&pb);
3078 pmc->pm_state = PMC_STATE_ALLOCATED;
3081 * mark row disposition
3084 if (PMC_IS_SYSTEM_MODE(mode))
3085 PMC_MARK_ROW_STANDALONE(n);
3087 PMC_MARK_ROW_THREAD(n);
3090 * Register this PMC with the current thread as its owner.
3094 pmc_register_owner(curthread->td_proc, pmc)) != 0) {
3095 pmc_release_pmc_descriptor(pmc);
3102 * Return the allocated index.
3105 pa.pm_pmcid = pmc->pm_id;
3107 error = copyout(&pa, arg, sizeof(pa));
3113 * Attach a PMC to a process.
3116 case PMC_OP_PMCATTACH:
3120 struct pmc_op_pmcattach a;
3122 sx_assert(&pmc_sx, SX_XLOCKED);
3124 if ((error = copyin(arg, &a, sizeof(a))) != 0)
3130 } else if (a.pm_pid == 0)
3131 a.pm_pid = td->td_proc->p_pid;
3133 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
3136 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
3141 /* PMCs may be (re)attached only when allocated or stopped */
3142 if (pm->pm_state == PMC_STATE_RUNNING) {
3145 } else if (pm->pm_state != PMC_STATE_ALLOCATED &&
3146 pm->pm_state != PMC_STATE_STOPPED) {
3152 if ((p = pfind(a.pm_pid)) == NULL) {
3158 * Ignore processes that are working on exiting.
3160 if (p->p_flag & P_WEXIT) {
3162 PROC_UNLOCK(p); /* pfind() returns a locked process */
3167 * we are allowed to attach a PMC to a process if
3170 error = p_candebug(curthread, p);
3175 error = pmc_attach_process(p, pm);
3181 * Detach an attached PMC from a process.
3184 case PMC_OP_PMCDETACH:
3188 struct pmc_op_pmcattach a;
3190 if ((error = copyin(arg, &a, sizeof(a))) != 0)
3196 } else if (a.pm_pid == 0)
3197 a.pm_pid = td->td_proc->p_pid;
3199 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
3202 if ((p = pfind(a.pm_pid)) == NULL) {
3208 * Treat processes that are in the process of exiting
3209 * as if they were not present.
3212 if (p->p_flag & P_WEXIT)
3215 PROC_UNLOCK(p); /* pfind() returns a locked process */
3218 error = pmc_detach_process(p, pm);
3224 * Retrieve the MSR number associated with the counter
3225 * 'pmc_id'. This allows processes to directly use RDPMC
3226 * instructions to read their PMCs, without the overhead of a
3230 case PMC_OP_PMCGETMSR:
3234 struct pmc_target *pt;
3235 struct pmc_op_getmsr gm;
3239 /* CPU has no 'GETMSR' support */
3240 if (md->pmd_get_msr == NULL) {
3245 if ((error = copyin(arg, &gm, sizeof(gm))) != 0)
3248 if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0)
3252 * The allocated PMC has to be a process virtual PMC,
3253 * i.e., of type MODE_T[CS]. Global PMCs can only be
3254 * read using the PMCREAD operation since they may be
3255 * allocated on a different CPU than the one we could
3256 * be running on at the time of the RDPMC instruction.
3258 * The GETMSR operation is not allowed for PMCs that
3259 * are inherited across processes.
3262 if (!PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) ||
3263 (pm->pm_flags & PMC_F_DESCENDANTS)) {
3269 * It only makes sense to use a RDPMC (or its
3270 * equivalent instruction on non-x86 architectures) on
3271 * a process that has allocated and attached a PMC to
3272 * itself. Conversely the PMC is only allowed to have
3273 * one process attached to it -- its owner.
3276 if ((pt = LIST_FIRST(&pm->pm_targets)) == NULL ||
3277 LIST_NEXT(pt, pt_next) != NULL ||
3278 pt->pt_process->pp_proc != pm->pm_owner->po_owner) {
3283 ri = PMC_TO_ROWINDEX(pm);
3285 if ((error = (*md->pmd_get_msr)(ri, &gm.pm_msr)) < 0)
3288 if ((error = copyout(&gm, arg, sizeof(gm))) < 0)
3292 * Mark our process as using MSRs. Update machine
3293 * state using a forced context switch.
3296 pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS;
3297 pmc_force_context_switch();
3303 * Release an allocated PMC
3306 case PMC_OP_PMCRELEASE:
3310 struct pmc_owner *po;
3311 struct pmc_op_simple sp;
3314 * Find PMC pointer for the named PMC.
3316 * Use pmc_release_pmc_descriptor() to switch off the
3317 * PMC, remove all its target threads, and remove the
3318 * PMC from its owner's list.
3320 * Remove the owner record if this is the last PMC
3326 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3329 pmcid = sp.pm_pmcid;
3331 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3335 pmc_release_pmc_descriptor(pm);
3336 pmc_maybe_remove_owner(po);
3344 * Read and/or write a PMC.
3351 struct pmc_op_pmcrw *pprw;
3352 struct pmc_op_pmcrw prw;
3353 struct pmc_binding pb;
3354 pmc_value_t oldvalue;
3358 if ((error = copyin(arg, &prw, sizeof(prw))) != 0)
3362 PMCDBG(PMC,OPS,1, "rw id=%d flags=0x%x", prw.pm_pmcid,
3365 /* must have at least one flag set */
3366 if ((prw.pm_flags & (PMC_F_OLDVALUE|PMC_F_NEWVALUE)) == 0) {
3371 /* locate pmc descriptor */
3372 if ((error = pmc_find_pmc(prw.pm_pmcid, &pm)) != 0)
3375 /* Can't read a PMC that hasn't been started. */
3376 if (pm->pm_state != PMC_STATE_ALLOCATED &&
3377 pm->pm_state != PMC_STATE_STOPPED &&
3378 pm->pm_state != PMC_STATE_RUNNING) {
3383 /* writing a new value is allowed only for 'STOPPED' pmcs */
3384 if (pm->pm_state == PMC_STATE_RUNNING &&
3385 (prw.pm_flags & PMC_F_NEWVALUE)) {
3390 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) {
3393 * If this PMC is attached to its owner (i.e.,
3394 * the process requesting this operation) and
3395 * is running, then attempt to get an
3396 * upto-date reading from hardware for a READ.
3397 * Writes are only allowed when the PMC is
3398 * stopped, so only update the saved value
3401 * If the PMC is not running, or is not
3402 * attached to its owner, read/write to the
3406 ri = PMC_TO_ROWINDEX(pm);
3408 mtx_pool_lock_spin(pmc_mtxpool, pm);
3409 cpu = curthread->td_oncpu;
3411 if (prw.pm_flags & PMC_F_OLDVALUE) {
3412 if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) &&
3413 (pm->pm_state == PMC_STATE_RUNNING))
3414 error = (*md->pmd_read_pmc)(cpu, ri,
3417 oldvalue = pm->pm_gv.pm_savedvalue;
3419 if (prw.pm_flags & PMC_F_NEWVALUE)
3420 pm->pm_gv.pm_savedvalue = prw.pm_value;
3422 mtx_pool_unlock_spin(pmc_mtxpool, pm);
3424 } else { /* System mode PMCs */
3425 cpu = PMC_TO_CPU(pm);
3426 ri = PMC_TO_ROWINDEX(pm);
3428 if (pmc_cpu_is_disabled(cpu)) {
3433 /* move this thread to CPU 'cpu' */
3434 pmc_save_cpu_binding(&pb);
3435 pmc_select_cpu(cpu);
3438 /* save old value */
3439 if (prw.pm_flags & PMC_F_OLDVALUE)
3440 if ((error = (*md->pmd_read_pmc)(cpu, ri,
3443 /* write out new value */
3444 if (prw.pm_flags & PMC_F_NEWVALUE)
3445 error = (*md->pmd_write_pmc)(cpu, ri,
3449 pmc_restore_cpu_binding(&pb);
3454 pprw = (struct pmc_op_pmcrw *) arg;
3457 if (prw.pm_flags & PMC_F_NEWVALUE)
3458 PMCDBG(PMC,OPS,2, "rw id=%d new %jx -> old %jx",
3459 ri, prw.pm_value, oldvalue);
3460 else if (prw.pm_flags & PMC_F_OLDVALUE)
3461 PMCDBG(PMC,OPS,2, "rw id=%d -> old %jx", ri, oldvalue);
3464 /* return old value if requested */
3465 if (prw.pm_flags & PMC_F_OLDVALUE)
3466 if ((error = copyout(&oldvalue, &pprw->pm_value,
3467 sizeof(prw.pm_value))))
3475 * Set the sampling rate for a sampling mode PMC and the
3476 * initial count for a counting mode PMC.
3479 case PMC_OP_PMCSETCOUNT:
3482 struct pmc_op_pmcsetcount sc;
3486 if ((error = copyin(arg, &sc, sizeof(sc))) != 0)
3489 if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0)
3492 if (pm->pm_state == PMC_STATE_RUNNING) {
3497 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
3498 pm->pm_sc.pm_reloadcount = sc.pm_count;
3500 pm->pm_sc.pm_initial = sc.pm_count;
3509 case PMC_OP_PMCSTART:
3513 struct pmc_op_simple sp;
3515 sx_assert(&pmc_sx, SX_XLOCKED);
3517 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3520 pmcid = sp.pm_pmcid;
3522 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3525 KASSERT(pmcid == pm->pm_id,
3526 ("[pmc,%d] pmcid %x != id %x", __LINE__,
3529 if (pm->pm_state == PMC_STATE_RUNNING) /* already running */
3531 else if (pm->pm_state != PMC_STATE_STOPPED &&
3532 pm->pm_state != PMC_STATE_ALLOCATED) {
3537 error = pmc_start(pm);
3546 case PMC_OP_PMCSTOP:
3550 struct pmc_op_simple sp;
3554 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3557 pmcid = sp.pm_pmcid;
3560 * Mark the PMC as inactive and invoke the MD stop
3561 * routines if needed.
3564 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3567 KASSERT(pmcid == pm->pm_id,
3568 ("[pmc,%d] pmc id %x != pmcid %x", __LINE__,
3571 if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */
3573 else if (pm->pm_state != PMC_STATE_RUNNING) {
3578 error = pmc_stop(pm);
3584 * Write a user supplied value to the log file.
3587 case PMC_OP_WRITELOG:
3589 struct pmc_op_writelog wl;
3590 struct pmc_owner *po;
3594 if ((error = copyin(arg, &wl, sizeof(wl))) != 0)
3597 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
3602 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
3607 error = pmclog_process_userlog(po, &wl);
3617 if (is_sx_downgraded)
3618 sx_sunlock(&pmc_sx);
3620 sx_xunlock(&pmc_sx);
3623 atomic_add_int(&pmc_stats.pm_syscall_errors, 1);
3636 * Interrupt processing.
3638 * Find a free slot in the per-cpu array of PC samples and write the
3639 * current (PMC,PID,PC) triple to it. If an event was successfully
3640 * added, a bit is set in mask 'pmc_cpumask' denoting that the
3641 * DO_SAMPLES hook needs to be invoked from the clock handler.
3643 * This function is meant to be called from an NMI handler. It cannot
3644 * use any of the locking primitives supplied by the OS.
3648 pmc_process_interrupt(int cpu, struct pmc *pm, uintfptr_t pc, int usermode)
3652 struct pmc_sample *ps;
3653 struct pmc_samplebuffer *psb;
3656 ri = PMC_TO_ROWINDEX(pm);
3658 psb = pmc_pcpu[cpu]->pc_sb;
3661 if (ps->ps_pc) { /* in use, reader hasn't caught up */
3663 atomic_add_int(&pmc_stats.pm_intr_bufferfull, 1);
3664 PMCDBG(SAM,INT,1,"(spc) cpu=%d pm=%p pc=%jx um=%d wr=%d rd=%d",
3665 cpu, pm, (uint64_t) pc, usermode,
3666 (int) (psb->ps_write - psb->ps_samples),
3667 (int) (psb->ps_read - psb->ps_samples));
3673 PMCDBG(SAM,INT,1,"cpu=%d pm=%p pc=%jx um=%d wr=%d rd=%d", cpu, pm,
3674 (uint64_t) pc, usermode,
3675 (int) (psb->ps_write - psb->ps_samples),
3676 (int) (psb->ps_read - psb->ps_samples));
3678 atomic_add_rel_32(&pm->pm_runcount, 1); /* hold onto PMC */
3680 if ((td = curthread) && td->td_proc)
3681 ps->ps_pid = td->td_proc->p_pid;
3684 ps->ps_usermode = usermode;
3685 ps->ps_pc = pc; /* mark entry as in use */
3687 /* increment write pointer, modulo ring buffer size */
3689 if (ps == psb->ps_fence)
3690 psb->ps_write = psb->ps_samples;
3695 /* mark CPU as needing processing */
3696 atomic_set_rel_int(&pmc_cpumask, (1 << cpu));
3703 * Process saved PC samples.
3707 pmc_process_samples(int cpu)
3712 struct pmc_owner *po;
3713 struct pmc_sample *ps;
3714 struct pmc_samplebuffer *psb;
3716 KASSERT(PCPU_GET(cpuid) == cpu,
3717 ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", __LINE__,
3718 PCPU_GET(cpuid), cpu));
3720 psb = pmc_pcpu[cpu]->pc_sb;
3722 for (n = 0; n < pmc_nsamples; n++) { /* bound on #iterations */
3725 if (ps->ps_pc == (uintfptr_t) 0) /* no data */
3731 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
3732 ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__,
3733 pm, PMC_TO_MODE(pm)));
3735 /* Ignore PMCs that have been switched off */
3736 if (pm->pm_state != PMC_STATE_RUNNING)
3739 PMCDBG(SAM,OPS,1,"cpu=%d pm=%p pc=%jx um=%d wr=%d rd=%d", cpu,
3740 pm, (uint64_t) ps->ps_pc, ps->ps_usermode,
3741 (int) (psb->ps_write - psb->ps_samples),
3742 (int) (psb->ps_read - psb->ps_samples));
3745 * If this is a process-mode PMC that is attached to
3746 * its owner, and if the PC is in user mode, update
3747 * profiling statistics like timer-based profiling
3750 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) {
3751 if (ps->ps_usermode) {
3752 td = FIRST_THREAD_IN_PROC(po->po_owner);
3753 addupc_intr(td, ps->ps_pc, 1);
3759 * Otherwise, this is either a sampling mode PMC that
3760 * is attached to a different process than its owner,
3761 * or a system-wide sampling PMC. Dispatch a log
3762 * entry to the PMC's owner process.
3765 pmclog_process_pcsample(pm, ps);
3768 ps->ps_pc = (uintfptr_t) 0; /* mark entry as free */
3769 atomic_subtract_rel_32(&pm->pm_runcount, 1);
3771 /* increment read pointer, modulo sample size */
3772 if (++ps == psb->ps_fence)
3773 psb->ps_read = psb->ps_samples;
3778 atomic_add_int(&pmc_stats.pm_log_sweeps, 1);
3780 /* Do not re-enable stalled PMCs if we failed to process any samples */
3785 * Restart any stalled sampling PMCs on this CPU.
3787 * If the NMI handler sets the pm_stalled field of a PMC after
3788 * the check below, we'll end up processing the stalled PMC at
3789 * the next hardclock tick.
3791 for (n = 0; n < md->pmd_npmc; n++) {
3792 (void) (*md->pmd_get_config)(cpu,n,&pm);
3793 if (pm == NULL || /* !cfg'ed */
3794 pm->pm_state != PMC_STATE_RUNNING || /* !active */
3795 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) || /* !sampling */
3796 pm->pm_stalled == 0) /* !stalled */
3800 ri = PMC_TO_ROWINDEX(pm);
3801 (*md->pmd_start_pmc)(cpu, ri);
3810 * Handle a process exit.
3812 * Remove this process from all hash tables. If this process
3813 * owned any PMCs, turn off those PMCs and deallocate them,
3814 * removing any associations with target processes.
3816 * This function will be called by the last 'thread' of a
3819 * XXX This eventhandler gets called early in the exit process.
3820 * Consider using a 'hook' invocation from thread_exit() or equivalent
3821 * spot. Another negative is that kse_exit doesn't seem to call
3827 pmc_process_exit(void *arg __unused, struct proc *p)
3829 int is_using_hwpmcs;
3833 struct pmc_process *pp;
3834 struct pmc_owner *po;
3835 pmc_value_t newvalue, tmp;
3838 is_using_hwpmcs = p->p_flag & P_HWPMC;
3842 * Log a sysexit event to all SS PMC owners.
3844 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
3845 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
3846 pmclog_process_sysexit(po, p->p_pid);
3848 if (!is_using_hwpmcs)
3852 PMCDBG(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid,
3856 * Since this code is invoked by the last thread in an exiting
3857 * process, we would have context switched IN at some prior
3858 * point. However, with PREEMPTION, kernel mode context
3859 * switches may happen any time, so we want to disable a
3860 * context switch OUT till we get any PMCs targetting this
3861 * process off the hardware.
3863 * We also need to atomically remove this process'
3864 * entry from our target process hash table, using
3867 PMCDBG(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid,
3870 critical_enter(); /* no preemption */
3872 cpu = curthread->td_oncpu;
3874 if ((pp = pmc_find_process_descriptor(p,
3875 PMC_FLAG_REMOVE)) != NULL) {
3878 "process-exit proc=%p pmc-process=%p", p, pp);
3881 * The exiting process could the target of
3882 * some PMCs which will be running on
3883 * currently executing CPU.
3885 * We need to turn these PMCs off like we
3886 * would do at context switch OUT time.
3888 for (ri = 0; ri < md->pmd_npmc; ri++) {
3891 * Pick up the pmc pointer from hardware
3892 * state similar to the CSW_OUT code.
3895 (void) (*md->pmd_get_config)(cpu, ri, &pm);
3897 PMCDBG(PRC,EXT,2, "ri=%d pm=%p", ri, pm);
3900 !PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
3903 PMCDBG(PRC,EXT,2, "ppmcs[%d]=%p pm=%p "
3904 "state=%d", ri, pp->pp_pmcs[ri].pp_pmc,
3907 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
3908 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
3909 __LINE__, PMC_TO_ROWINDEX(pm), ri));
3911 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
3912 ("[pmc,%d] pm %p != pp_pmcs[%d] %p",
3913 __LINE__, pm, ri, pp->pp_pmcs[ri].pp_pmc));
3915 (void) md->pmd_stop_pmc(cpu, ri);
3917 KASSERT(pm->pm_runcount > 0,
3918 ("[pmc,%d] bad runcount ri %d rc %d",
3919 __LINE__, ri, pm->pm_runcount));
3921 /* Stop hardware only if it is actually running */
3922 if (pm->pm_state == PMC_STATE_RUNNING &&
3923 pm->pm_stalled == 0) {
3924 md->pmd_read_pmc(cpu, ri, &newvalue);
3926 PMC_PCPU_SAVED(cpu,ri);
3928 mtx_pool_lock_spin(pmc_mtxpool, pm);
3929 pm->pm_gv.pm_savedvalue += tmp;
3930 pp->pp_pmcs[ri].pp_pmcval += tmp;
3931 mtx_pool_unlock_spin(pmc_mtxpool, pm);
3934 atomic_subtract_rel_32(&pm->pm_runcount,1);
3936 KASSERT((int) pm->pm_runcount >= 0,
3937 ("[pmc,%d] runcount is %d", __LINE__, ri));
3939 (void) md->pmd_config_pmc(cpu, ri, NULL);
3943 * Inform the MD layer of this pseudo "context switch
3946 (void) md->pmd_switch_out(pmc_pcpu[cpu], pp);
3948 critical_exit(); /* ok to be pre-empted now */
3951 * Unlink this process from the PMCs that are
3952 * targetting it. This will send a signal to
3953 * all PMC owner's whose PMCs are orphaned.
3955 * Log PMC value at exit time if requested.
3957 for (ri = 0; ri < md->pmd_npmc; ri++)
3958 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
3959 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
3960 PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm)))
3961 pmclog_process_procexit(pm, pp);
3962 pmc_unlink_target_process(pm, pp);
3967 critical_exit(); /* pp == NULL */
3971 * If the process owned PMCs, free them up and free up
3974 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
3975 pmc_remove_owner(po);
3976 pmc_destroy_owner_descriptor(po);
3979 sx_xunlock(&pmc_sx);
3983 * Handle a process fork.
3985 * If the parent process 'p1' is under HWPMC monitoring, then copy
3986 * over any attached PMCs that have 'do_descendants' semantics.
3990 pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *newproc,
3993 int is_using_hwpmcs;
3995 uint32_t do_descendants;
3997 struct pmc_owner *po;
3998 struct pmc_process *ppnew, *ppold;
4000 (void) flags; /* unused parameter */
4003 is_using_hwpmcs = p1->p_flag & P_HWPMC;
4007 * If there are system-wide sampling PMCs active, we need to
4008 * log all fork events to their owner's logs.
4011 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
4012 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
4013 pmclog_process_procfork(po, p1->p_pid, newproc->p_pid);
4015 if (!is_using_hwpmcs)
4019 PMCDBG(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1,
4020 p1->p_pid, p1->p_comm, newproc);
4023 * If the parent process (curthread->td_proc) is a
4024 * target of any PMCs, look for PMCs that are to be
4025 * inherited, and link these into the new process
4028 if ((ppold = pmc_find_process_descriptor(curthread->td_proc,
4029 PMC_FLAG_NONE)) == NULL)
4030 goto done; /* nothing to do */
4033 for (ri = 0; ri < md->pmd_npmc; ri++)
4034 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL)
4035 do_descendants |= pm->pm_flags & PMC_F_DESCENDANTS;
4036 if (do_descendants == 0) /* nothing to do */
4039 /* allocate a descriptor for the new process */
4040 if ((ppnew = pmc_find_process_descriptor(newproc,
4041 PMC_FLAG_ALLOCATE)) == NULL)
4045 * Run through all PMCs that were targeting the old process
4046 * and which specified F_DESCENDANTS and attach them to the
4049 * Log the fork event to all owners of PMCs attached to this
4050 * process, if not already logged.
4052 for (ri = 0; ri < md->pmd_npmc; ri++)
4053 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL &&
4054 (pm->pm_flags & PMC_F_DESCENDANTS)) {
4055 pmc_link_target_process(pm, ppnew);
4057 if (po->po_sscount == 0 &&
4058 po->po_flags & PMC_PO_OWNS_LOGFILE)
4059 pmclog_process_procfork(po, p1->p_pid,
4064 * Now mark the new process as being tracked by this driver.
4067 newproc->p_flag |= P_HWPMC;
4068 PROC_UNLOCK(newproc);
4071 sx_xunlock(&pmc_sx);
4079 static const char *pmc_name_of_pmcclass[] = {
4081 #define __PMC_CLASS(N) #N ,
4086 pmc_initialize(void)
4089 struct pmc_binding pb;
4090 struct pmc_samplebuffer *sb;
4096 /* parse debug flags first */
4097 if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags",
4098 pmc_debugstr, sizeof(pmc_debugstr)))
4099 pmc_debugflags_parse(pmc_debugstr,
4100 pmc_debugstr+strlen(pmc_debugstr));
4103 PMCDBG(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION);
4105 /* check kernel version */
4106 if (pmc_kernel_version != PMC_VERSION) {
4107 if (pmc_kernel_version == 0)
4108 printf("hwpmc: this kernel has not been compiled with "
4109 "'options HWPMC_HOOKS'.\n");
4111 printf("hwpmc: kernel version (0x%x) does not match "
4112 "module version (0x%x).\n", pmc_kernel_version,
4114 return EPROGMISMATCH;
4118 * check sysctl parameters
4121 if (pmc_hashsize <= 0) {
4122 (void) printf("hwpmc: tunable hashsize=%d must be greater "
4123 "than zero.\n", pmc_hashsize);
4124 pmc_hashsize = PMC_HASH_SIZE;
4127 if (pmc_nsamples <= 0 || pmc_nsamples > 65535) {
4128 (void) printf("hwpmc: tunable nsamples=%d out of range.\n",
4130 pmc_nsamples = PMC_NSAMPLES;
4133 md = pmc_md_initialize();
4135 if (md == NULL || md->pmd_init == NULL)
4138 /* allocate space for the per-cpu array */
4139 MALLOC(pmc_pcpu, struct pmc_cpu **, mp_ncpus * sizeof(struct pmc_cpu *),
4140 M_PMC, M_WAITOK|M_ZERO);
4142 /* per-cpu 'saved values' for managing process-mode PMCs */
4143 MALLOC(pmc_pcpu_saved, pmc_value_t *,
4144 sizeof(pmc_value_t) * mp_ncpus * md->pmd_npmc, M_PMC, M_WAITOK);
4146 /* perform cpu dependent initialization */
4147 pmc_save_cpu_binding(&pb);
4148 for (cpu = 0; cpu < mp_ncpus; cpu++) {
4149 if (pmc_cpu_is_disabled(cpu))
4151 pmc_select_cpu(cpu);
4152 if ((error = md->pmd_init(cpu)) != 0)
4155 pmc_restore_cpu_binding(&pb);
4160 /* allocate space for the sample array */
4161 for (cpu = 0; cpu < mp_ncpus; cpu++) {
4162 if (pmc_cpu_is_disabled(cpu))
4164 MALLOC(sb, struct pmc_samplebuffer *,
4165 sizeof(struct pmc_samplebuffer) +
4166 pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
4169 sb->ps_read = sb->ps_write = sb->ps_samples;
4170 sb->ps_fence = sb->ps_samples + pmc_nsamples;
4171 KASSERT(pmc_pcpu[cpu] != NULL,
4172 ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
4174 pmc_pcpu[cpu]->pc_sb = sb;
4177 /* allocate space for the row disposition array */
4178 pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc,
4179 M_PMC, M_WAITOK|M_ZERO);
4181 KASSERT(pmc_pmcdisp != NULL,
4182 ("[pmc,%d] pmcdisp allocation returned NULL", __LINE__));
4184 /* mark all PMCs as available */
4185 for (n = 0; n < (int) md->pmd_npmc; n++)
4186 PMC_MARK_ROW_FREE(n);
4188 /* allocate thread hash tables */
4189 pmc_ownerhash = hashinit(pmc_hashsize, M_PMC,
4190 &pmc_ownerhashmask);
4192 pmc_processhash = hashinit(pmc_hashsize, M_PMC,
4193 &pmc_processhashmask);
4194 mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf",
4197 LIST_INIT(&pmc_ss_owners);
4200 /* allocate a pool of spin mutexes */
4201 pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size,
4204 PMCDBG(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx "
4205 "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask,
4206 pmc_processhash, pmc_processhashmask);
4208 /* register process {exit,fork,exec} handlers */
4209 pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit,
4210 pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY);
4211 pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork,
4212 pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY);
4214 /* initialize logging */
4215 pmclog_initialize();
4217 /* set hook functions */
4218 pmc_intr = md->pmd_intr;
4219 pmc_hook = pmc_hook_handler;
4222 printf(PMC_MODULE_NAME ":");
4223 for (n = 0; n < (int) md->pmd_nclass; n++) {
4224 printf(" %s/%d/0x%b",
4225 pmc_name_of_pmcclass[md->pmd_classes[n].pm_class],
4226 md->pmd_nclasspmcs[n],
4227 md->pmd_classes[n].pm_caps,
4229 "\1INT\2USR\3SYS\4EDG\5THR"
4230 "\6REA\7WRI\10INV\11QUA\12PRC"
4239 /* prepare to be unloaded */
4244 struct pmc_ownerhash *ph;
4245 struct pmc_owner *po, *tmp;
4246 struct pmc_binding pb;
4248 struct pmc_processhash *prh;
4251 PMCDBG(MOD,INI,0, "%s", "cleanup");
4253 /* switch off sampling */
4254 atomic_store_rel_int(&pmc_cpumask, 0);
4258 if (pmc_hook == NULL) { /* being unloaded already */
4259 sx_xunlock(&pmc_sx);
4263 pmc_hook = NULL; /* prevent new threads from entering module */
4265 /* deregister event handlers */
4266 EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag);
4267 EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag);
4269 /* send SIGBUS to all owner threads, free up allocations */
4271 for (ph = pmc_ownerhash;
4272 ph <= &pmc_ownerhash[pmc_ownerhashmask];
4274 LIST_FOREACH_SAFE(po, ph, po_next, tmp) {
4275 pmc_remove_owner(po);
4277 /* send SIGBUS to owner processes */
4278 PMCDBG(MOD,INI,2, "cleanup signal proc=%p "
4279 "(%d, %s)", po->po_owner,
4280 po->po_owner->p_pid,
4281 po->po_owner->p_comm);
4283 PROC_LOCK(po->po_owner);
4284 psignal(po->po_owner, SIGBUS);
4285 PROC_UNLOCK(po->po_owner);
4287 pmc_destroy_owner_descriptor(po);
4291 /* reclaim allocated data structures */
4293 mtx_pool_destroy(&pmc_mtxpool);
4295 mtx_destroy(&pmc_processhash_mtx);
4296 if (pmc_processhash) {
4298 struct pmc_process *pp;
4300 PMCDBG(MOD,INI,3, "%s", "destroy process hash");
4301 for (prh = pmc_processhash;
4302 prh <= &pmc_processhash[pmc_processhashmask];
4304 LIST_FOREACH(pp, prh, pp_next)
4305 PMCDBG(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid);
4308 hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask);
4309 pmc_processhash = NULL;
4312 if (pmc_ownerhash) {
4313 PMCDBG(MOD,INI,3, "%s", "destroy owner hash");
4314 hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask);
4315 pmc_ownerhash = NULL;
4318 KASSERT(LIST_EMPTY(&pmc_ss_owners),
4319 ("[pmc,%d] Global SS owner list not empty", __LINE__));
4320 KASSERT(pmc_ss_count == 0,
4321 ("[pmc,%d] Global SS count not empty", __LINE__));
4323 /* free the per-cpu sample buffers */
4324 for (cpu = 0; cpu < mp_ncpus; cpu++) {
4325 if (pmc_cpu_is_disabled(cpu))
4327 KASSERT(pmc_pcpu[cpu]->pc_sb != NULL,
4328 ("[pmc,%d] Null cpu sample buffer cpu=%d", __LINE__,
4330 FREE(pmc_pcpu[cpu]->pc_sb, M_PMC);
4331 pmc_pcpu[cpu]->pc_sb = NULL;
4334 /* do processor dependent cleanup */
4335 PMCDBG(MOD,INI,3, "%s", "md cleanup");
4337 pmc_save_cpu_binding(&pb);
4338 for (cpu = 0; cpu < mp_ncpus; cpu++) {
4339 PMCDBG(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p",
4340 cpu, pmc_pcpu[cpu]);
4341 if (pmc_cpu_is_disabled(cpu))
4343 pmc_select_cpu(cpu);
4345 (void) md->pmd_cleanup(cpu);
4349 pmc_restore_cpu_binding(&pb);
4352 /* deallocate per-cpu structures */
4353 FREE(pmc_pcpu, M_PMC);
4356 FREE(pmc_pcpu_saved, M_PMC);
4357 pmc_pcpu_saved = NULL;
4360 FREE(pmc_pmcdisp, M_PMC);
4366 sx_xunlock(&pmc_sx); /* we are done */
4370 * The function called at load/unload.
4374 load (struct module *module __unused, int cmd, void *arg __unused)
4382 /* initialize the subsystem */
4383 error = pmc_initialize();
4386 PMCDBG(MOD,INI,1, "syscall=%d ncpus=%d",
4387 pmc_syscall_num, mp_ncpus);
4394 PMCDBG(MOD,INI,1, "%s", "unloaded");
4398 error = EINVAL; /* XXX should panic(9) */
4406 MALLOC_DEFINE(M_PMC, "pmc", "Memory space for the PMC module");