]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - sys/dev/hwpmc/hwpmc_mod.c
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / sys / dev / hwpmc / hwpmc_mod.c
1 /*-
2  * Copyright (c) 2003-2008 Joseph Koshy
3  * Copyright (c) 2007 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by A. Joseph Koshy under
7  * sponsorship from the FreeBSD Foundation and Google, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/eventhandler.h>
37 #include <sys/jail.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/limits.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/mount.h>
45 #include <sys/mutex.h>
46 #include <sys/pmc.h>
47 #include <sys/pmckern.h>
48 #include <sys/pmclog.h>
49 #include <sys/priv.h>
50 #include <sys/proc.h>
51 #include <sys/queue.h>
52 #include <sys/resourcevar.h>
53 #include <sys/sched.h>
54 #include <sys/signalvar.h>
55 #include <sys/smp.h>
56 #include <sys/sx.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysent.h>
59 #include <sys/systm.h>
60 #include <sys/vnode.h>
61
62 #include <sys/linker.h>         /* needs to be after <sys/malloc.h> */
63
64 #include <machine/atomic.h>
65 #include <machine/md_var.h>
66
67 #include <vm/vm.h>
68 #include <vm/vm_extern.h>
69 #include <vm/pmap.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_object.h>
72
73 #include "hwpmc_soft.h"
74
75 /*
76  * Types
77  */
78
79 enum pmc_flags {
80         PMC_FLAG_NONE     = 0x00, /* do nothing */
81         PMC_FLAG_REMOVE   = 0x01, /* atomically remove entry from hash */
82         PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */
83 };
84
85 /*
86  * The offset in sysent where the syscall is allocated.
87  */
88
89 static int pmc_syscall_num = NO_SYSCALL;
90 struct pmc_cpu          **pmc_pcpu;      /* per-cpu state */
91 pmc_value_t             *pmc_pcpu_saved; /* saved PMC values: CSW handling */
92
93 #define PMC_PCPU_SAVED(C,R)     pmc_pcpu_saved[(R) + md->pmd_npmc*(C)]
94
95 struct mtx_pool         *pmc_mtxpool;
96 static int              *pmc_pmcdisp;    /* PMC row dispositions */
97
98 #define PMC_ROW_DISP_IS_FREE(R)         (pmc_pmcdisp[(R)] == 0)
99 #define PMC_ROW_DISP_IS_THREAD(R)       (pmc_pmcdisp[(R)] > 0)
100 #define PMC_ROW_DISP_IS_STANDALONE(R)   (pmc_pmcdisp[(R)] < 0)
101
102 #define PMC_MARK_ROW_FREE(R) do {                                         \
103         pmc_pmcdisp[(R)] = 0;                                             \
104 } while (0)
105
106 #define PMC_MARK_ROW_STANDALONE(R) do {                                   \
107         KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
108                     __LINE__));                                           \
109         atomic_add_int(&pmc_pmcdisp[(R)], -1);                            \
110         KASSERT(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active()),              \
111                 ("[pmc,%d] row disposition error", __LINE__));            \
112 } while (0)
113
114 #define PMC_UNMARK_ROW_STANDALONE(R) do {                                 \
115         atomic_add_int(&pmc_pmcdisp[(R)], 1);                             \
116         KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
117                     __LINE__));                                           \
118 } while (0)
119
120 #define PMC_MARK_ROW_THREAD(R) do {                                       \
121         KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
122                     __LINE__));                                           \
123         atomic_add_int(&pmc_pmcdisp[(R)], 1);                             \
124 } while (0)
125
126 #define PMC_UNMARK_ROW_THREAD(R) do {                                     \
127         atomic_add_int(&pmc_pmcdisp[(R)], -1);                            \
128         KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
129                     __LINE__));                                           \
130 } while (0)
131
132
133 /* various event handlers */
134 static eventhandler_tag pmc_exit_tag, pmc_fork_tag;
135
136 /* Module statistics */
137 struct pmc_op_getdriverstats pmc_stats;
138
139 /* Machine/processor dependent operations */
140 static struct pmc_mdep  *md;
141
142 /*
143  * Hash tables mapping owner processes and target threads to PMCs.
144  */
145
146 struct mtx pmc_processhash_mtx;         /* spin mutex */
147 static u_long pmc_processhashmask;
148 static LIST_HEAD(pmc_processhash, pmc_process)  *pmc_processhash;
149
150 /*
151  * Hash table of PMC owner descriptors.  This table is protected by
152  * the shared PMC "sx" lock.
153  */
154
155 static u_long pmc_ownerhashmask;
156 static LIST_HEAD(pmc_ownerhash, pmc_owner)      *pmc_ownerhash;
157
158 /*
159  * List of PMC owners with system-wide sampling PMCs.
160  */
161
162 static LIST_HEAD(, pmc_owner)                   pmc_ss_owners;
163
164
165 /*
166  * A map of row indices to classdep structures.
167  */
168 static struct pmc_classdep **pmc_rowindex_to_classdep;
169
170 /*
171  * Prototypes
172  */
173
174 #ifdef  DEBUG
175 static int      pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS);
176 static int      pmc_debugflags_parse(char *newstr, char *fence);
177 #endif
178
179 static int      load(struct module *module, int cmd, void *arg);
180 static int      pmc_attach_process(struct proc *p, struct pmc *pm);
181 static struct pmc *pmc_allocate_pmc_descriptor(void);
182 static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p);
183 static int      pmc_attach_one_process(struct proc *p, struct pmc *pm);
184 static int      pmc_can_allocate_rowindex(struct proc *p, unsigned int ri,
185     int cpu);
186 static int      pmc_can_attach(struct pmc *pm, struct proc *p);
187 static void     pmc_capture_user_callchain(int cpu, int soft, struct trapframe *tf);
188 static void     pmc_cleanup(void);
189 static int      pmc_detach_process(struct proc *p, struct pmc *pm);
190 static int      pmc_detach_one_process(struct proc *p, struct pmc *pm,
191     int flags);
192 static void     pmc_destroy_owner_descriptor(struct pmc_owner *po);
193 static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p);
194 static int      pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm);
195 static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po,
196     pmc_id_t pmc);
197 static struct pmc_process *pmc_find_process_descriptor(struct proc *p,
198     uint32_t mode);
199 static void     pmc_force_context_switch(void);
200 static void     pmc_link_target_process(struct pmc *pm,
201     struct pmc_process *pp);
202 static void     pmc_log_all_process_mappings(struct pmc_owner *po);
203 static void     pmc_log_kernel_mappings(struct pmc *pm);
204 static void     pmc_log_process_mappings(struct pmc_owner *po, struct proc *p);
205 static void     pmc_maybe_remove_owner(struct pmc_owner *po);
206 static void     pmc_process_csw_in(struct thread *td);
207 static void     pmc_process_csw_out(struct thread *td);
208 static void     pmc_process_exit(void *arg, struct proc *p);
209 static void     pmc_process_fork(void *arg, struct proc *p1,
210     struct proc *p2, int n);
211 static void     pmc_process_samples(int cpu, int soft);
212 static void     pmc_release_pmc_descriptor(struct pmc *pmc);
213 static void     pmc_remove_owner(struct pmc_owner *po);
214 static void     pmc_remove_process_descriptor(struct pmc_process *pp);
215 static void     pmc_restore_cpu_binding(struct pmc_binding *pb);
216 static void     pmc_save_cpu_binding(struct pmc_binding *pb);
217 static void     pmc_select_cpu(int cpu);
218 static int      pmc_start(struct pmc *pm);
219 static int      pmc_stop(struct pmc *pm);
220 static int      pmc_syscall_handler(struct thread *td, void *syscall_args);
221 static void     pmc_unlink_target_process(struct pmc *pmc,
222     struct pmc_process *pp);
223 static int generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp);
224 static int generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp);
225 static struct pmc_mdep *pmc_generic_cpu_initialize(void);
226 static void pmc_generic_cpu_finalize(struct pmc_mdep *md);
227
228 /*
229  * Kernel tunables and sysctl(8) interface.
230  */
231
232 SYSCTL_DECL(_kern_hwpmc);
233
234 static int pmc_callchaindepth = PMC_CALLCHAIN_DEPTH;
235 TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "callchaindepth", &pmc_callchaindepth);
236 SYSCTL_INT(_kern_hwpmc, OID_AUTO, callchaindepth, CTLFLAG_TUN|CTLFLAG_RD,
237     &pmc_callchaindepth, 0, "depth of call chain records");
238
239 #ifdef  DEBUG
240 struct pmc_debugflags pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS;
241 char    pmc_debugstr[PMC_DEBUG_STRSIZE];
242 TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,
243     sizeof(pmc_debugstr));
244 SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
245     CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_TUN,
246     0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags");
247 #endif
248
249 /*
250  * kern.hwpmc.hashrows -- determines the number of rows in the
251  * of the hash table used to look up threads
252  */
253
254 static int pmc_hashsize = PMC_HASH_SIZE;
255 TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "hashsize", &pmc_hashsize);
256 SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_TUN|CTLFLAG_RD,
257     &pmc_hashsize, 0, "rows in hash tables");
258
259 /*
260  * kern.hwpmc.nsamples --- number of PC samples/callchain stacks per CPU
261  */
262
263 static int pmc_nsamples = PMC_NSAMPLES;
264 TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nsamples", &pmc_nsamples);
265 SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_TUN|CTLFLAG_RD,
266     &pmc_nsamples, 0, "number of PC samples per CPU");
267
268
269 /*
270  * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool.
271  */
272
273 static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE;
274 TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "mtxpoolsize", &pmc_mtxpool_size);
275 SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_TUN|CTLFLAG_RD,
276     &pmc_mtxpool_size, 0, "size of spin mutex pool");
277
278
279 /*
280  * security.bsd.unprivileged_syspmcs -- allow non-root processes to
281  * allocate system-wide PMCs.
282  *
283  * Allowing unprivileged processes to allocate system PMCs is convenient
284  * if system-wide measurements need to be taken concurrently with other
285  * per-process measurements.  This feature is turned off by default.
286  */
287
288 static int pmc_unprivileged_syspmcs = 0;
289 TUNABLE_INT("security.bsd.unprivileged_syspmcs", &pmc_unprivileged_syspmcs);
290 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RW,
291     &pmc_unprivileged_syspmcs, 0,
292     "allow unprivileged process to allocate system PMCs");
293
294 /*
295  * Hash function.  Discard the lower 2 bits of the pointer since
296  * these are always zero for our uses.  The hash multiplier is
297  * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
298  */
299
300 #if     LONG_BIT == 64
301 #define _PMC_HM         11400714819323198486u
302 #elif   LONG_BIT == 32
303 #define _PMC_HM         2654435769u
304 #else
305 #error  Must know the size of 'long' to compile
306 #endif
307
308 #define PMC_HASH_PTR(P,M)       ((((unsigned long) (P) >> 2) * _PMC_HM) & (M))
309
310 /*
311  * Syscall structures
312  */
313
314 /* The `sysent' for the new syscall */
315 static struct sysent pmc_sysent = {
316         2,                      /* sy_narg */
317         pmc_syscall_handler     /* sy_call */
318 };
319
320 static struct syscall_module_data pmc_syscall_mod = {
321         load,
322         NULL,
323         &pmc_syscall_num,
324         &pmc_sysent,
325         { 0, NULL }
326 };
327
328 static moduledata_t pmc_mod = {
329         PMC_MODULE_NAME,
330         syscall_module_handler,
331         &pmc_syscall_mod
332 };
333
334 DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY);
335 MODULE_VERSION(pmc, PMC_VERSION);
336
337 #ifdef  DEBUG
338 enum pmc_dbgparse_state {
339         PMCDS_WS,               /* in whitespace */
340         PMCDS_MAJOR,            /* seen a major keyword */
341         PMCDS_MINOR
342 };
343
344 static int
345 pmc_debugflags_parse(char *newstr, char *fence)
346 {
347         char c, *p, *q;
348         struct pmc_debugflags *tmpflags;
349         int error, found, *newbits, tmp;
350         size_t kwlen;
351
352         tmpflags = malloc(sizeof(*tmpflags), M_PMC, M_WAITOK|M_ZERO);
353
354         p = newstr;
355         error = 0;
356
357         for (; p < fence && (c = *p); p++) {
358
359                 /* skip white space */
360                 if (c == ' ' || c == '\t')
361                         continue;
362
363                 /* look for a keyword followed by "=" */
364                 for (q = p; p < fence && (c = *p) && c != '='; p++)
365                         ;
366                 if (c != '=') {
367                         error = EINVAL;
368                         goto done;
369                 }
370
371                 kwlen = p - q;
372                 newbits = NULL;
373
374                 /* lookup flag group name */
375 #define DBG_SET_FLAG_MAJ(S,F)                                           \
376                 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0)  \
377                         newbits = &tmpflags->pdb_ ## F;
378
379                 DBG_SET_FLAG_MAJ("cpu",         CPU);
380                 DBG_SET_FLAG_MAJ("csw",         CSW);
381                 DBG_SET_FLAG_MAJ("logging",     LOG);
382                 DBG_SET_FLAG_MAJ("module",      MOD);
383                 DBG_SET_FLAG_MAJ("md",          MDP);
384                 DBG_SET_FLAG_MAJ("owner",       OWN);
385                 DBG_SET_FLAG_MAJ("pmc",         PMC);
386                 DBG_SET_FLAG_MAJ("process",     PRC);
387                 DBG_SET_FLAG_MAJ("sampling",    SAM);
388
389                 if (newbits == NULL) {
390                         error = EINVAL;
391                         goto done;
392                 }
393
394                 p++;            /* skip the '=' */
395
396                 /* Now parse the individual flags */
397                 tmp = 0;
398         newflag:
399                 for (q = p; p < fence && (c = *p); p++)
400                         if (c == ' ' || c == '\t' || c == ',')
401                                 break;
402
403                 /* p == fence or c == ws or c == "," or c == 0 */
404
405                 if ((kwlen = p - q) == 0) {
406                         *newbits = tmp;
407                         continue;
408                 }
409
410                 found = 0;
411 #define DBG_SET_FLAG_MIN(S,F)                                           \
412                 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0)  \
413                         tmp |= found = (1 << PMC_DEBUG_MIN_ ## F)
414
415                 /* a '*' denotes all possible flags in the group */
416                 if (kwlen == 1 && *q == '*')
417                         tmp = found = ~0;
418                 /* look for individual flag names */
419                 DBG_SET_FLAG_MIN("allocaterow", ALR);
420                 DBG_SET_FLAG_MIN("allocate",    ALL);
421                 DBG_SET_FLAG_MIN("attach",      ATT);
422                 DBG_SET_FLAG_MIN("bind",        BND);
423                 DBG_SET_FLAG_MIN("config",      CFG);
424                 DBG_SET_FLAG_MIN("exec",        EXC);
425                 DBG_SET_FLAG_MIN("exit",        EXT);
426                 DBG_SET_FLAG_MIN("find",        FND);
427                 DBG_SET_FLAG_MIN("flush",       FLS);
428                 DBG_SET_FLAG_MIN("fork",        FRK);
429                 DBG_SET_FLAG_MIN("getbuf",      GTB);
430                 DBG_SET_FLAG_MIN("hook",        PMH);
431                 DBG_SET_FLAG_MIN("init",        INI);
432                 DBG_SET_FLAG_MIN("intr",        INT);
433                 DBG_SET_FLAG_MIN("linktarget",  TLK);
434                 DBG_SET_FLAG_MIN("mayberemove", OMR);
435                 DBG_SET_FLAG_MIN("ops",         OPS);
436                 DBG_SET_FLAG_MIN("read",        REA);
437                 DBG_SET_FLAG_MIN("register",    REG);
438                 DBG_SET_FLAG_MIN("release",     REL);
439                 DBG_SET_FLAG_MIN("remove",      ORM);
440                 DBG_SET_FLAG_MIN("sample",      SAM);
441                 DBG_SET_FLAG_MIN("scheduleio",  SIO);
442                 DBG_SET_FLAG_MIN("select",      SEL);
443                 DBG_SET_FLAG_MIN("signal",      SIG);
444                 DBG_SET_FLAG_MIN("swi",         SWI);
445                 DBG_SET_FLAG_MIN("swo",         SWO);
446                 DBG_SET_FLAG_MIN("start",       STA);
447                 DBG_SET_FLAG_MIN("stop",        STO);
448                 DBG_SET_FLAG_MIN("syscall",     PMS);
449                 DBG_SET_FLAG_MIN("unlinktarget", TUL);
450                 DBG_SET_FLAG_MIN("write",       WRI);
451                 if (found == 0) {
452                         /* unrecognized flag name */
453                         error = EINVAL;
454                         goto done;
455                 }
456
457                 if (c == 0 || c == ' ' || c == '\t') {  /* end of flag group */
458                         *newbits = tmp;
459                         continue;
460                 }
461
462                 p++;
463                 goto newflag;
464         }
465
466         /* save the new flag set */
467         bcopy(tmpflags, &pmc_debugflags, sizeof(pmc_debugflags));
468
469  done:
470         free(tmpflags, M_PMC);
471         return error;
472 }
473
474 static int
475 pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS)
476 {
477         char *fence, *newstr;
478         int error;
479         unsigned int n;
480
481         (void) arg1; (void) arg2; /* unused parameters */
482
483         n = sizeof(pmc_debugstr);
484         newstr = malloc(n, M_PMC, M_WAITOK|M_ZERO);
485         (void) strlcpy(newstr, pmc_debugstr, n);
486
487         error = sysctl_handle_string(oidp, newstr, n, req);
488
489         /* if there is a new string, parse and copy it */
490         if (error == 0 && req->newptr != NULL) {
491                 fence = newstr + (n < req->newlen ? n : req->newlen + 1);
492                 if ((error = pmc_debugflags_parse(newstr, fence)) == 0)
493                         (void) strlcpy(pmc_debugstr, newstr,
494                             sizeof(pmc_debugstr));
495         }
496
497         free(newstr, M_PMC);
498
499         return error;
500 }
501 #endif
502
503 /*
504  * Map a row index to a classdep structure and return the adjusted row
505  * index for the PMC class index.
506  */
507 static struct pmc_classdep *
508 pmc_ri_to_classdep(struct pmc_mdep *md, int ri, int *adjri)
509 {
510         struct pmc_classdep *pcd;
511
512         (void) md;
513
514         KASSERT(ri >= 0 && ri < md->pmd_npmc,
515             ("[pmc,%d] illegal row-index %d", __LINE__, ri));
516
517         pcd = pmc_rowindex_to_classdep[ri];
518
519         KASSERT(pcd != NULL,
520             ("[pmc,%d] ri %d null pcd", __LINE__, ri));
521
522         *adjri = ri - pcd->pcd_ri;
523
524         KASSERT(*adjri >= 0 && *adjri < pcd->pcd_num,
525             ("[pmc,%d] adjusted row-index %d", __LINE__, *adjri));
526
527         return (pcd);
528 }
529
530 /*
531  * Concurrency Control
532  *
533  * The driver manages the following data structures:
534  *
535  *   - target process descriptors, one per target process
536  *   - owner process descriptors (and attached lists), one per owner process
537  *   - lookup hash tables for owner and target processes
538  *   - PMC descriptors (and attached lists)
539  *   - per-cpu hardware state
540  *   - the 'hook' variable through which the kernel calls into
541  *     this module
542  *   - the machine hardware state (managed by the MD layer)
543  *
544  * These data structures are accessed from:
545  *
546  * - thread context-switch code
547  * - interrupt handlers (possibly on multiple cpus)
548  * - kernel threads on multiple cpus running on behalf of user
549  *   processes doing system calls
550  * - this driver's private kernel threads
551  *
552  * = Locks and Locking strategy =
553  *
554  * The driver uses four locking strategies for its operation:
555  *
556  * - The global SX lock "pmc_sx" is used to protect internal
557  *   data structures.
558  *
559  *   Calls into the module by syscall() start with this lock being
560  *   held in exclusive mode.  Depending on the requested operation,
561  *   the lock may be downgraded to 'shared' mode to allow more
562  *   concurrent readers into the module.  Calls into the module from
563  *   other parts of the kernel acquire the lock in shared mode.
564  *
565  *   This SX lock is held in exclusive mode for any operations that
566  *   modify the linkages between the driver's internal data structures.
567  *
568  *   The 'pmc_hook' function pointer is also protected by this lock.
569  *   It is only examined with the sx lock held in exclusive mode.  The
570  *   kernel module is allowed to be unloaded only with the sx lock held
571  *   in exclusive mode.  In normal syscall handling, after acquiring the
572  *   pmc_sx lock we first check that 'pmc_hook' is non-null before
573  *   proceeding.  This prevents races between the thread unloading the module
574  *   and other threads seeking to use the module.
575  *
576  * - Lookups of target process structures and owner process structures
577  *   cannot use the global "pmc_sx" SX lock because these lookups need
578  *   to happen during context switches and in other critical sections
579  *   where sleeping is not allowed.  We protect these lookup tables
580  *   with their own private spin-mutexes, "pmc_processhash_mtx" and
581  *   "pmc_ownerhash_mtx".
582  *
583  * - Interrupt handlers work in a lock free manner.  At interrupt
584  *   time, handlers look at the PMC pointer (phw->phw_pmc) configured
585  *   when the PMC was started.  If this pointer is NULL, the interrupt
586  *   is ignored after updating driver statistics.  We ensure that this
587  *   pointer is set (using an atomic operation if necessary) before the
588  *   PMC hardware is started.  Conversely, this pointer is unset atomically
589  *   only after the PMC hardware is stopped.
590  *
591  *   We ensure that everything needed for the operation of an
592  *   interrupt handler is available without it needing to acquire any
593  *   locks.  We also ensure that a PMC's software state is destroyed only
594  *   after the PMC is taken off hardware (on all CPUs).
595  *
596  * - Context-switch handling with process-private PMCs needs more
597  *   care.
598  *
599  *   A given process may be the target of multiple PMCs.  For example,
600  *   PMCATTACH and PMCDETACH may be requested by a process on one CPU
601  *   while the target process is running on another.  A PMC could also
602  *   be getting released because its owner is exiting.  We tackle
603  *   these situations in the following manner:
604  *
605  *   - each target process structure 'pmc_process' has an array
606  *     of 'struct pmc *' pointers, one for each hardware PMC.
607  *
608  *   - At context switch IN time, each "target" PMC in RUNNING state
609  *     gets started on hardware and a pointer to each PMC is copied into
610  *     the per-cpu phw array.  The 'runcount' for the PMC is
611  *     incremented.
612  *
613  *   - At context switch OUT time, all process-virtual PMCs are stopped
614  *     on hardware.  The saved value is added to the PMCs value field
615  *     only if the PMC is in a non-deleted state (the PMCs state could
616  *     have changed during the current time slice).
617  *
618  *     Note that since in-between a switch IN on a processor and a switch
619  *     OUT, the PMC could have been released on another CPU.  Therefore
620  *     context switch OUT always looks at the hardware state to turn
621  *     OFF PMCs and will update a PMC's saved value only if reachable
622  *     from the target process record.
623  *
624  *   - OP PMCRELEASE could be called on a PMC at any time (the PMC could
625  *     be attached to many processes at the time of the call and could
626  *     be active on multiple CPUs).
627  *
628  *     We prevent further scheduling of the PMC by marking it as in
629  *     state 'DELETED'.  If the runcount of the PMC is non-zero then
630  *     this PMC is currently running on a CPU somewhere.  The thread
631  *     doing the PMCRELEASE operation waits by repeatedly doing a
632  *     pause() till the runcount comes to zero.
633  *
634  * The contents of a PMC descriptor (struct pmc) are protected using
635  * a spin-mutex.  In order to save space, we use a mutex pool.
636  *
637  * In terms of lock types used by witness(4), we use:
638  * - Type "pmc-sx", used by the global SX lock.
639  * - Type "pmc-sleep", for sleep mutexes used by logger threads.
640  * - Type "pmc-per-proc", for protecting PMC owner descriptors.
641  * - Type "pmc-leaf", used for all other spin mutexes.
642  */
643
644 /*
645  * save the cpu binding of the current kthread
646  */
647
648 static void
649 pmc_save_cpu_binding(struct pmc_binding *pb)
650 {
651         PMCDBG(CPU,BND,2, "%s", "save-cpu");
652         thread_lock(curthread);
653         pb->pb_bound = sched_is_bound(curthread);
654         pb->pb_cpu   = curthread->td_oncpu;
655         thread_unlock(curthread);
656         PMCDBG(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu);
657 }
658
659 /*
660  * restore the cpu binding of the current thread
661  */
662
663 static void
664 pmc_restore_cpu_binding(struct pmc_binding *pb)
665 {
666         PMCDBG(CPU,BND,2, "restore-cpu curcpu=%d restore=%d",
667             curthread->td_oncpu, pb->pb_cpu);
668         thread_lock(curthread);
669         if (pb->pb_bound)
670                 sched_bind(curthread, pb->pb_cpu);
671         else
672                 sched_unbind(curthread);
673         thread_unlock(curthread);
674         PMCDBG(CPU,BND,2, "%s", "restore-cpu done");
675 }
676
677 /*
678  * move execution over the specified cpu and bind it there.
679  */
680
681 static void
682 pmc_select_cpu(int cpu)
683 {
684         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
685             ("[pmc,%d] bad cpu number %d", __LINE__, cpu));
686
687         /* Never move to an inactive CPU. */
688         KASSERT(pmc_cpu_is_active(cpu), ("[pmc,%d] selecting inactive "
689             "CPU %d", __LINE__, cpu));
690
691         PMCDBG(CPU,SEL,2, "select-cpu cpu=%d", cpu);
692         thread_lock(curthread);
693         sched_bind(curthread, cpu);
694         thread_unlock(curthread);
695
696         KASSERT(curthread->td_oncpu == cpu,
697             ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__,
698                 cpu, curthread->td_oncpu));
699
700         PMCDBG(CPU,SEL,2, "select-cpu cpu=%d ok", cpu);
701 }
702
703 /*
704  * Force a context switch.
705  *
706  * We do this by pause'ing for 1 tick -- invoking mi_switch() is not
707  * guaranteed to force a context switch.
708  */
709
710 static void
711 pmc_force_context_switch(void)
712 {
713
714         pause("pmcctx", 1);
715 }
716
717 /*
718  * Get the file name for an executable.  This is a simple wrapper
719  * around vn_fullpath(9).
720  */
721
722 static void
723 pmc_getfilename(struct vnode *v, char **fullpath, char **freepath)
724 {
725
726         *fullpath = "unknown";
727         *freepath = NULL;
728         vn_fullpath(curthread, v, fullpath, freepath);
729 }
730
731 /*
732  * remove an process owning PMCs
733  */
734
735 void
736 pmc_remove_owner(struct pmc_owner *po)
737 {
738         struct pmc *pm, *tmp;
739
740         sx_assert(&pmc_sx, SX_XLOCKED);
741
742         PMCDBG(OWN,ORM,1, "remove-owner po=%p", po);
743
744         /* Remove descriptor from the owner hash table */
745         LIST_REMOVE(po, po_next);
746
747         /* release all owned PMC descriptors */
748         LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) {
749                 PMCDBG(OWN,ORM,2, "pmc=%p", pm);
750                 KASSERT(pm->pm_owner == po,
751                     ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po));
752
753                 pmc_release_pmc_descriptor(pm); /* will unlink from the list */
754         }
755
756         KASSERT(po->po_sscount == 0,
757             ("[pmc,%d] SS count not zero", __LINE__));
758         KASSERT(LIST_EMPTY(&po->po_pmcs),
759             ("[pmc,%d] PMC list not empty", __LINE__));
760
761         /* de-configure the log file if present */
762         if (po->po_flags & PMC_PO_OWNS_LOGFILE)
763                 pmclog_deconfigure_log(po);
764 }
765
766 /*
767  * remove an owner process record if all conditions are met.
768  */
769
770 static void
771 pmc_maybe_remove_owner(struct pmc_owner *po)
772 {
773
774         PMCDBG(OWN,OMR,1, "maybe-remove-owner po=%p", po);
775
776         /*
777          * Remove owner record if
778          * - this process does not own any PMCs
779          * - this process has not allocated a system-wide sampling buffer
780          */
781
782         if (LIST_EMPTY(&po->po_pmcs) &&
783             ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)) {
784                 pmc_remove_owner(po);
785                 pmc_destroy_owner_descriptor(po);
786         }
787 }
788
789 /*
790  * Add an association between a target process and a PMC.
791  */
792
793 static void
794 pmc_link_target_process(struct pmc *pm, struct pmc_process *pp)
795 {
796         int ri;
797         struct pmc_target *pt;
798
799         sx_assert(&pmc_sx, SX_XLOCKED);
800
801         KASSERT(pm != NULL && pp != NULL,
802             ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
803         KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
804             ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d",
805                 __LINE__, pm, pp->pp_proc->p_pid));
806         KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= ((int) md->pmd_npmc - 1),
807             ("[pmc,%d] Illegal reference count %d for process record %p",
808                 __LINE__, pp->pp_refcnt, (void *) pp));
809
810         ri = PMC_TO_ROWINDEX(pm);
811
812         PMCDBG(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p",
813             pm, ri, pp);
814
815 #ifdef  DEBUG
816         LIST_FOREACH(pt, &pm->pm_targets, pt_next)
817             if (pt->pt_process == pp)
818                     KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets",
819                                 __LINE__, pp, pm));
820 #endif
821
822         pt = malloc(sizeof(struct pmc_target), M_PMC, M_WAITOK|M_ZERO);
823         pt->pt_process = pp;
824
825         LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next);
826
827         atomic_store_rel_ptr((uintptr_t *)&pp->pp_pmcs[ri].pp_pmc,
828             (uintptr_t)pm);
829
830         if (pm->pm_owner->po_owner == pp->pp_proc)
831                 pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER;
832
833         /*
834          * Initialize the per-process values at this row index.
835          */
836         pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ?
837             pm->pm_sc.pm_reloadcount : 0;
838
839         pp->pp_refcnt++;
840
841 }
842
843 /*
844  * Removes the association between a target process and a PMC.
845  */
846
847 static void
848 pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
849 {
850         int ri;
851         struct proc *p;
852         struct pmc_target *ptgt;
853
854         sx_assert(&pmc_sx, SX_XLOCKED);
855
856         KASSERT(pm != NULL && pp != NULL,
857             ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
858
859         KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt <= (int) md->pmd_npmc,
860             ("[pmc,%d] Illegal ref count %d on process record %p",
861                 __LINE__, pp->pp_refcnt, (void *) pp));
862
863         ri = PMC_TO_ROWINDEX(pm);
864
865         PMCDBG(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p",
866             pm, ri, pp);
867
868         KASSERT(pp->pp_pmcs[ri].pp_pmc == pm,
869             ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__,
870                 ri, pm, pp->pp_pmcs[ri].pp_pmc));
871
872         pp->pp_pmcs[ri].pp_pmc = NULL;
873         pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0;
874
875         /* Remove owner-specific flags */
876         if (pm->pm_owner->po_owner == pp->pp_proc) {
877                 pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS;
878                 pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER;
879         }
880
881         pp->pp_refcnt--;
882
883         /* Remove the target process from the PMC structure */
884         LIST_FOREACH(ptgt, &pm->pm_targets, pt_next)
885                 if (ptgt->pt_process == pp)
886                         break;
887
888         KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found "
889                     "in pmc %p", __LINE__, pp->pp_proc, pp, pm));
890
891         LIST_REMOVE(ptgt, pt_next);
892         free(ptgt, M_PMC);
893
894         /* if the PMC now lacks targets, send the owner a SIGIO */
895         if (LIST_EMPTY(&pm->pm_targets)) {
896                 p = pm->pm_owner->po_owner;
897                 PROC_LOCK(p);
898                 kern_psignal(p, SIGIO);
899                 PROC_UNLOCK(p);
900
901                 PMCDBG(PRC,SIG,2, "signalling proc=%p signal=%d", p,
902                     SIGIO);
903         }
904 }
905
906 /*
907  * Check if PMC 'pm' may be attached to target process 't'.
908  */
909
910 static int
911 pmc_can_attach(struct pmc *pm, struct proc *t)
912 {
913         struct proc *o;         /* pmc owner */
914         struct ucred *oc, *tc;  /* owner, target credentials */
915         int decline_attach, i;
916
917         /*
918          * A PMC's owner can always attach that PMC to itself.
919          */
920
921         if ((o = pm->pm_owner->po_owner) == t)
922                 return 0;
923
924         PROC_LOCK(o);
925         oc = o->p_ucred;
926         crhold(oc);
927         PROC_UNLOCK(o);
928
929         PROC_LOCK(t);
930         tc = t->p_ucred;
931         crhold(tc);
932         PROC_UNLOCK(t);
933
934         /*
935          * The effective uid of the PMC owner should match at least one
936          * of the {effective,real,saved} uids of the target process.
937          */
938
939         decline_attach = oc->cr_uid != tc->cr_uid &&
940             oc->cr_uid != tc->cr_svuid &&
941             oc->cr_uid != tc->cr_ruid;
942
943         /*
944          * Every one of the target's group ids, must be in the owner's
945          * group list.
946          */
947         for (i = 0; !decline_attach && i < tc->cr_ngroups; i++)
948                 decline_attach = !groupmember(tc->cr_groups[i], oc);
949
950         /* check the read and saved gids too */
951         if (decline_attach == 0)
952                 decline_attach = !groupmember(tc->cr_rgid, oc) ||
953                     !groupmember(tc->cr_svgid, oc);
954
955         crfree(tc);
956         crfree(oc);
957
958         return !decline_attach;
959 }
960
961 /*
962  * Attach a process to a PMC.
963  */
964
965 static int
966 pmc_attach_one_process(struct proc *p, struct pmc *pm)
967 {
968         int ri;
969         char *fullpath, *freepath;
970         struct pmc_process      *pp;
971
972         sx_assert(&pmc_sx, SX_XLOCKED);
973
974         PMCDBG(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm,
975             PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
976
977         /*
978          * Locate the process descriptor corresponding to process 'p',
979          * allocating space as needed.
980          *
981          * Verify that rowindex 'pm_rowindex' is free in the process
982          * descriptor.
983          *
984          * If not, allocate space for a descriptor and link the
985          * process descriptor and PMC.
986          */
987         ri = PMC_TO_ROWINDEX(pm);
988
989         if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL)
990                 return ENOMEM;
991
992         if (pp->pp_pmcs[ri].pp_pmc == pm) /* already present at slot [ri] */
993                 return EEXIST;
994
995         if (pp->pp_pmcs[ri].pp_pmc != NULL)
996                 return EBUSY;
997
998         pmc_link_target_process(pm, pp);
999
1000         if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) &&
1001             (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) == 0)
1002                 pm->pm_flags |= PMC_F_NEEDS_LOGFILE;
1003
1004         pm->pm_flags |= PMC_F_ATTACH_DONE; /* mark as attached */
1005
1006         /* issue an attach event to a configured log file */
1007         if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) {
1008                 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
1009                 if (p->p_flag & P_KTHREAD) {
1010                         fullpath = kernelname;
1011                         freepath = NULL;
1012                 } else
1013                         pmclog_process_pmcattach(pm, p->p_pid, fullpath);
1014                 if (freepath)
1015                         free(freepath, M_TEMP);
1016                 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1017                         pmc_log_process_mappings(pm->pm_owner, p);
1018         }
1019         /* mark process as using HWPMCs */
1020         PROC_LOCK(p);
1021         p->p_flag |= P_HWPMC;
1022         PROC_UNLOCK(p);
1023
1024         return 0;
1025 }
1026
1027 /*
1028  * Attach a process and optionally its children
1029  */
1030
1031 static int
1032 pmc_attach_process(struct proc *p, struct pmc *pm)
1033 {
1034         int error;
1035         struct proc *top;
1036
1037         sx_assert(&pmc_sx, SX_XLOCKED);
1038
1039         PMCDBG(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm,
1040             PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1041
1042
1043         /*
1044          * If this PMC successfully allowed a GETMSR operation
1045          * in the past, disallow further ATTACHes.
1046          */
1047
1048         if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0)
1049                 return EPERM;
1050
1051         if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1052                 return pmc_attach_one_process(p, pm);
1053
1054         /*
1055          * Traverse all child processes, attaching them to
1056          * this PMC.
1057          */
1058
1059         sx_slock(&proctree_lock);
1060
1061         top = p;
1062
1063         for (;;) {
1064                 if ((error = pmc_attach_one_process(p, pm)) != 0)
1065                         break;
1066                 if (!LIST_EMPTY(&p->p_children))
1067                         p = LIST_FIRST(&p->p_children);
1068                 else for (;;) {
1069                         if (p == top)
1070                                 goto done;
1071                         if (LIST_NEXT(p, p_sibling)) {
1072                                 p = LIST_NEXT(p, p_sibling);
1073                                 break;
1074                         }
1075                         p = p->p_pptr;
1076                 }
1077         }
1078
1079         if (error)
1080                 (void) pmc_detach_process(top, pm);
1081
1082  done:
1083         sx_sunlock(&proctree_lock);
1084         return error;
1085 }
1086
1087 /*
1088  * Detach a process from a PMC.  If there are no other PMCs tracking
1089  * this process, remove the process structure from its hash table.  If
1090  * 'flags' contains PMC_FLAG_REMOVE, then free the process structure.
1091  */
1092
1093 static int
1094 pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags)
1095 {
1096         int ri;
1097         struct pmc_process *pp;
1098
1099         sx_assert(&pmc_sx, SX_XLOCKED);
1100
1101         KASSERT(pm != NULL,
1102             ("[pmc,%d] null pm pointer", __LINE__));
1103
1104         ri = PMC_TO_ROWINDEX(pm);
1105
1106         PMCDBG(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x",
1107             pm, ri, p, p->p_pid, p->p_comm, flags);
1108
1109         if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
1110                 return ESRCH;
1111
1112         if (pp->pp_pmcs[ri].pp_pmc != pm)
1113                 return EINVAL;
1114
1115         pmc_unlink_target_process(pm, pp);
1116
1117         /* Issue a detach entry if a log file is configured */
1118         if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE)
1119                 pmclog_process_pmcdetach(pm, p->p_pid);
1120
1121         /*
1122          * If there are no PMCs targetting this process, we remove its
1123          * descriptor from the target hash table and unset the P_HWPMC
1124          * flag in the struct proc.
1125          */
1126         KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc,
1127             ("[pmc,%d] Illegal refcnt %d for process struct %p",
1128                 __LINE__, pp->pp_refcnt, pp));
1129
1130         if (pp->pp_refcnt != 0) /* still a target of some PMC */
1131                 return 0;
1132
1133         pmc_remove_process_descriptor(pp);
1134
1135         if (flags & PMC_FLAG_REMOVE)
1136                 free(pp, M_PMC);
1137
1138         PROC_LOCK(p);
1139         p->p_flag &= ~P_HWPMC;
1140         PROC_UNLOCK(p);
1141
1142         return 0;
1143 }
1144
1145 /*
1146  * Detach a process and optionally its descendants from a PMC.
1147  */
1148
1149 static int
1150 pmc_detach_process(struct proc *p, struct pmc *pm)
1151 {
1152         struct proc *top;
1153
1154         sx_assert(&pmc_sx, SX_XLOCKED);
1155
1156         PMCDBG(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm,
1157             PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1158
1159         if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1160                 return pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1161
1162         /*
1163          * Traverse all children, detaching them from this PMC.  We
1164          * ignore errors since we could be detaching a PMC from a
1165          * partially attached proc tree.
1166          */
1167
1168         sx_slock(&proctree_lock);
1169
1170         top = p;
1171
1172         for (;;) {
1173                 (void) pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1174
1175                 if (!LIST_EMPTY(&p->p_children))
1176                         p = LIST_FIRST(&p->p_children);
1177                 else for (;;) {
1178                         if (p == top)
1179                                 goto done;
1180                         if (LIST_NEXT(p, p_sibling)) {
1181                                 p = LIST_NEXT(p, p_sibling);
1182                                 break;
1183                         }
1184                         p = p->p_pptr;
1185                 }
1186         }
1187
1188  done:
1189         sx_sunlock(&proctree_lock);
1190
1191         if (LIST_EMPTY(&pm->pm_targets))
1192                 pm->pm_flags &= ~PMC_F_ATTACH_DONE;
1193
1194         return 0;
1195 }
1196
1197
1198 /*
1199  * Thread context switch IN
1200  */
1201
1202 static void
1203 pmc_process_csw_in(struct thread *td)
1204 {
1205         int cpu;
1206         unsigned int adjri, ri;
1207         struct pmc *pm;
1208         struct proc *p;
1209         struct pmc_cpu *pc;
1210         struct pmc_hw *phw;
1211         pmc_value_t newvalue;
1212         struct pmc_process *pp;
1213         struct pmc_classdep *pcd;
1214
1215         p = td->td_proc;
1216
1217         if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL)
1218                 return;
1219
1220         KASSERT(pp->pp_proc == td->td_proc,
1221             ("[pmc,%d] not my thread state", __LINE__));
1222
1223         critical_enter(); /* no preemption from this point */
1224
1225         cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1226
1227         PMCDBG(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1228             p->p_pid, p->p_comm, pp);
1229
1230         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1231             ("[pmc,%d] wierd CPU id %d", __LINE__, cpu));
1232
1233         pc = pmc_pcpu[cpu];
1234
1235         for (ri = 0; ri < md->pmd_npmc; ri++) {
1236
1237                 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL)
1238                         continue;
1239
1240                 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
1241                     ("[pmc,%d] Target PMC in non-virtual mode (%d)",
1242                         __LINE__, PMC_TO_MODE(pm)));
1243
1244                 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1245                     ("[pmc,%d] Row index mismatch pmc %d != ri %d",
1246                         __LINE__, PMC_TO_ROWINDEX(pm), ri));
1247
1248                 /*
1249                  * Only PMCs that are marked as 'RUNNING' need
1250                  * be placed on hardware.
1251                  */
1252
1253                 if (pm->pm_state != PMC_STATE_RUNNING)
1254                         continue;
1255
1256                 /* increment PMC runcount */
1257                 atomic_add_rel_int(&pm->pm_runcount, 1);
1258
1259                 /* configure the HWPMC we are going to use. */
1260                 pcd = pmc_ri_to_classdep(md, ri, &adjri);
1261                 pcd->pcd_config_pmc(cpu, adjri, pm);
1262
1263                 phw = pc->pc_hwpmcs[ri];
1264
1265                 KASSERT(phw != NULL,
1266                     ("[pmc,%d] null hw pointer", __LINE__));
1267
1268                 KASSERT(phw->phw_pmc == pm,
1269                     ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__,
1270                         phw->phw_pmc, pm));
1271
1272                 /*
1273                  * Write out saved value and start the PMC.
1274                  *
1275                  * Sampling PMCs use a per-process value, while
1276                  * counting mode PMCs use a per-pmc value that is
1277                  * inherited across descendants.
1278                  */
1279                 if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
1280                         mtx_pool_lock_spin(pmc_mtxpool, pm);
1281                         newvalue = PMC_PCPU_SAVED(cpu,ri) =
1282                             pp->pp_pmcs[ri].pp_pmcval;
1283                         mtx_pool_unlock_spin(pmc_mtxpool, pm);
1284                 } else {
1285                         KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC,
1286                             ("[pmc,%d] illegal mode=%d", __LINE__,
1287                             PMC_TO_MODE(pm)));
1288                         mtx_pool_lock_spin(pmc_mtxpool, pm);
1289                         newvalue = PMC_PCPU_SAVED(cpu, ri) =
1290                             pm->pm_gv.pm_savedvalue;
1291                         mtx_pool_unlock_spin(pmc_mtxpool, pm);
1292                 }
1293
1294                 PMCDBG(CSW,SWI,1,"cpu=%d ri=%d new=%jd", cpu, ri, newvalue);
1295
1296                 pcd->pcd_write_pmc(cpu, adjri, newvalue);
1297                 pcd->pcd_start_pmc(cpu, adjri);
1298         }
1299
1300         /*
1301          * perform any other architecture/cpu dependent thread
1302          * switch-in actions.
1303          */
1304
1305         (void) (*md->pmd_switch_in)(pc, pp);
1306
1307         critical_exit();
1308
1309 }
1310
1311 /*
1312  * Thread context switch OUT.
1313  */
1314
1315 static void
1316 pmc_process_csw_out(struct thread *td)
1317 {
1318         int cpu;
1319         int64_t tmp;
1320         struct pmc *pm;
1321         struct proc *p;
1322         enum pmc_mode mode;
1323         struct pmc_cpu *pc;
1324         pmc_value_t newvalue;
1325         unsigned int adjri, ri;
1326         struct pmc_process *pp;
1327         struct pmc_classdep *pcd;
1328
1329
1330         /*
1331          * Locate our process descriptor; this may be NULL if
1332          * this process is exiting and we have already removed
1333          * the process from the target process table.
1334          *
1335          * Note that due to kernel preemption, multiple
1336          * context switches may happen while the process is
1337          * exiting.
1338          *
1339          * Note also that if the target process cannot be
1340          * found we still need to deconfigure any PMCs that
1341          * are currently running on hardware.
1342          */
1343
1344         p = td->td_proc;
1345         pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE);
1346
1347         /*
1348          * save PMCs
1349          */
1350
1351         critical_enter();
1352
1353         cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1354
1355         PMCDBG(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1356             p->p_pid, p->p_comm, pp);
1357
1358         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1359             ("[pmc,%d wierd CPU id %d", __LINE__, cpu));
1360
1361         pc = pmc_pcpu[cpu];
1362
1363         /*
1364          * When a PMC gets unlinked from a target PMC, it will
1365          * be removed from the target's pp_pmc[] array.
1366          *
1367          * However, on a MP system, the target could have been
1368          * executing on another CPU at the time of the unlink.
1369          * So, at context switch OUT time, we need to look at
1370          * the hardware to determine if a PMC is scheduled on
1371          * it.
1372          */
1373
1374         for (ri = 0; ri < md->pmd_npmc; ri++) {
1375
1376                 pcd = pmc_ri_to_classdep(md, ri, &adjri);
1377                 pm  = NULL;
1378                 (void) (*pcd->pcd_get_config)(cpu, adjri, &pm);
1379
1380                 if (pm == NULL) /* nothing at this row index */
1381                         continue;
1382
1383                 mode = PMC_TO_MODE(pm);
1384                 if (!PMC_IS_VIRTUAL_MODE(mode))
1385                         continue; /* not a process virtual PMC */
1386
1387                 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1388                     ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
1389                         __LINE__, PMC_TO_ROWINDEX(pm), ri));
1390
1391                 /* Stop hardware if not already stopped */
1392                 if (pm->pm_stalled == 0)
1393                         pcd->pcd_stop_pmc(cpu, adjri);
1394
1395                 /* reduce this PMC's runcount */
1396                 atomic_subtract_rel_int(&pm->pm_runcount, 1);
1397
1398                 /*
1399                  * If this PMC is associated with this process,
1400                  * save the reading.
1401                  */
1402
1403                 if (pp != NULL && pp->pp_pmcs[ri].pp_pmc != NULL) {
1404
1405                         KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
1406                             ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__,
1407                                 pm, ri, pp->pp_pmcs[ri].pp_pmc));
1408
1409                         KASSERT(pp->pp_refcnt > 0,
1410                             ("[pmc,%d] pp refcnt = %d", __LINE__,
1411                                 pp->pp_refcnt));
1412
1413                         pcd->pcd_read_pmc(cpu, adjri, &newvalue);
1414
1415                         tmp = newvalue - PMC_PCPU_SAVED(cpu,ri);
1416
1417                         PMCDBG(CSW,SWO,1,"cpu=%d ri=%d tmp=%jd", cpu, ri,
1418                             tmp);
1419
1420                         if (mode == PMC_MODE_TS) {
1421
1422                                 /*
1423                                  * For sampling process-virtual PMCs,
1424                                  * we expect the count to be
1425                                  * decreasing as the 'value'
1426                                  * programmed into the PMC is the
1427                                  * number of events to be seen till
1428                                  * the next sampling interrupt.
1429                                  */
1430                                 if (tmp < 0)
1431                                         tmp += pm->pm_sc.pm_reloadcount;
1432                                 mtx_pool_lock_spin(pmc_mtxpool, pm);
1433                                 pp->pp_pmcs[ri].pp_pmcval -= tmp;
1434                                 if ((int64_t) pp->pp_pmcs[ri].pp_pmcval < 0)
1435                                         pp->pp_pmcs[ri].pp_pmcval +=
1436                                             pm->pm_sc.pm_reloadcount;
1437                                 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1438
1439                         } else {
1440
1441                                 /*
1442                                  * For counting process-virtual PMCs,
1443                                  * we expect the count to be
1444                                  * increasing monotonically, modulo a 64
1445                                  * bit wraparound.
1446                                  */
1447                                 KASSERT((int64_t) tmp >= 0,
1448                                     ("[pmc,%d] negative increment cpu=%d "
1449                                      "ri=%d newvalue=%jx saved=%jx "
1450                                      "incr=%jx", __LINE__, cpu, ri,
1451                                      newvalue, PMC_PCPU_SAVED(cpu,ri), tmp));
1452
1453                                 mtx_pool_lock_spin(pmc_mtxpool, pm);
1454                                 pm->pm_gv.pm_savedvalue += tmp;
1455                                 pp->pp_pmcs[ri].pp_pmcval += tmp;
1456                                 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1457
1458                                 if (pm->pm_flags & PMC_F_LOG_PROCCSW)
1459                                         pmclog_process_proccsw(pm, pp, tmp);
1460                         }
1461                 }
1462
1463                 /* mark hardware as free */
1464                 pcd->pcd_config_pmc(cpu, adjri, NULL);
1465         }
1466
1467         /*
1468          * perform any other architecture/cpu dependent thread
1469          * switch out functions.
1470          */
1471
1472         (void) (*md->pmd_switch_out)(pc, pp);
1473
1474         critical_exit();
1475 }
1476
1477 /*
1478  * Log a KLD operation.
1479  */
1480
1481 static void
1482 pmc_process_kld_load(struct pmckern_map_in *pkm)
1483 {
1484         struct pmc_owner *po;
1485
1486         sx_assert(&pmc_sx, SX_LOCKED);
1487
1488         /*
1489          * Notify owners of system sampling PMCs about KLD operations.
1490          */
1491
1492         LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1493             if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1494                 pmclog_process_map_in(po, (pid_t) -1, pkm->pm_address,
1495                     (char *) pkm->pm_file);
1496
1497         /*
1498          * TODO: Notify owners of (all) process-sampling PMCs too.
1499          */
1500
1501         return;
1502 }
1503
1504 static void
1505 pmc_process_kld_unload(struct pmckern_map_out *pkm)
1506 {
1507         struct pmc_owner *po;
1508
1509         sx_assert(&pmc_sx, SX_LOCKED);
1510
1511         LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1512             if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1513                 pmclog_process_map_out(po, (pid_t) -1,
1514                     pkm->pm_address, pkm->pm_address + pkm->pm_size);
1515
1516         /*
1517          * TODO: Notify owners of process-sampling PMCs.
1518          */
1519 }
1520
1521 /*
1522  * A mapping change for a process.
1523  */
1524
1525 static void
1526 pmc_process_mmap(struct thread *td, struct pmckern_map_in *pkm)
1527 {
1528         int ri;
1529         pid_t pid;
1530         char *fullpath, *freepath;
1531         const struct pmc *pm;
1532         struct pmc_owner *po;
1533         const struct pmc_process *pp;
1534
1535         freepath = fullpath = NULL;
1536         pmc_getfilename((struct vnode *) pkm->pm_file, &fullpath, &freepath);
1537
1538         pid = td->td_proc->p_pid;
1539
1540         /* Inform owners of all system-wide sampling PMCs. */
1541         LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1542             if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1543                 pmclog_process_map_in(po, pid, pkm->pm_address, fullpath);
1544
1545         if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1546                 goto done;
1547
1548         /*
1549          * Inform sampling PMC owners tracking this process.
1550          */
1551         for (ri = 0; ri < md->pmd_npmc; ri++)
1552                 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
1553                     PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1554                         pmclog_process_map_in(pm->pm_owner,
1555                             pid, pkm->pm_address, fullpath);
1556
1557   done:
1558         if (freepath)
1559                 free(freepath, M_TEMP);
1560 }
1561
1562
1563 /*
1564  * Log an munmap request.
1565  */
1566
1567 static void
1568 pmc_process_munmap(struct thread *td, struct pmckern_map_out *pkm)
1569 {
1570         int ri;
1571         pid_t pid;
1572         struct pmc_owner *po;
1573         const struct pmc *pm;
1574         const struct pmc_process *pp;
1575
1576         pid = td->td_proc->p_pid;
1577
1578         LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1579             if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1580                 pmclog_process_map_out(po, pid, pkm->pm_address,
1581                     pkm->pm_address + pkm->pm_size);
1582
1583         if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1584                 return;
1585
1586         for (ri = 0; ri < md->pmd_npmc; ri++)
1587                 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
1588                     PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1589                         pmclog_process_map_out(pm->pm_owner, pid,
1590                             pkm->pm_address, pkm->pm_address + pkm->pm_size);
1591 }
1592
1593 /*
1594  * Log mapping information about the kernel.
1595  */
1596
1597 static void
1598 pmc_log_kernel_mappings(struct pmc *pm)
1599 {
1600         struct pmc_owner *po;
1601         struct pmckern_map_in *km, *kmbase;
1602
1603         sx_assert(&pmc_sx, SX_LOCKED);
1604         KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
1605             ("[pmc,%d] non-sampling PMC (%p) desires mapping information",
1606                 __LINE__, (void *) pm));
1607
1608         po = pm->pm_owner;
1609
1610         if (po->po_flags & PMC_PO_INITIAL_MAPPINGS_DONE)
1611                 return;
1612
1613         /*
1614          * Log the current set of kernel modules.
1615          */
1616         kmbase = linker_hwpmc_list_objects();
1617         for (km = kmbase; km->pm_file != NULL; km++) {
1618                 PMCDBG(LOG,REG,1,"%s %p", (char *) km->pm_file,
1619                     (void *) km->pm_address);
1620                 pmclog_process_map_in(po, (pid_t) -1, km->pm_address,
1621                     km->pm_file);
1622         }
1623         free(kmbase, M_LINKER);
1624
1625         po->po_flags |= PMC_PO_INITIAL_MAPPINGS_DONE;
1626 }
1627
1628 /*
1629  * Log the mappings for a single process.
1630  */
1631
1632 static void
1633 pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
1634 {
1635         int locked;
1636         vm_map_t map;
1637         struct vnode *vp;
1638         struct vmspace *vm;
1639         vm_map_entry_t entry;
1640         vm_offset_t last_end;
1641         u_int last_timestamp;
1642         struct vnode *last_vp;
1643         vm_offset_t start_addr;
1644         vm_object_t obj, lobj, tobj;
1645         char *fullpath, *freepath;
1646
1647         last_vp = NULL;
1648         last_end = (vm_offset_t) 0;
1649         fullpath = freepath = NULL;
1650
1651         if ((vm = vmspace_acquire_ref(p)) == NULL)
1652                 return;
1653
1654         map = &vm->vm_map;
1655         vm_map_lock_read(map);
1656
1657         for (entry = map->header.next; entry != &map->header; entry = entry->next) {
1658
1659                 if (entry == NULL) {
1660                         PMCDBG(LOG,OPS,2, "hwpmc: vm_map entry unexpectedly "
1661                             "NULL! pid=%d vm_map=%p\n", p->p_pid, map);
1662                         break;
1663                 }
1664
1665                 /*
1666                  * We only care about executable map entries.
1667                  */
1668                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) ||
1669                     !(entry->protection & VM_PROT_EXECUTE) ||
1670                     (entry->object.vm_object == NULL)) {
1671                         continue;
1672                 }
1673
1674                 obj = entry->object.vm_object;
1675                 VM_OBJECT_LOCK(obj);
1676
1677                 /* 
1678                  * Walk the backing_object list to find the base
1679                  * (non-shadowed) vm_object.
1680                  */
1681                 for (lobj = tobj = obj; tobj != NULL; tobj = tobj->backing_object) {
1682                         if (tobj != obj)
1683                                 VM_OBJECT_LOCK(tobj);
1684                         if (lobj != obj)
1685                                 VM_OBJECT_UNLOCK(lobj);
1686                         lobj = tobj;
1687                 }
1688
1689                 /*
1690                  * At this point lobj is the base vm_object and it is locked.
1691                  */
1692                 if (lobj == NULL) {
1693                         PMCDBG(LOG,OPS,2, "hwpmc: lobj unexpectedly NULL! pid=%d "
1694                             "vm_map=%p vm_obj=%p\n", p->p_pid, map, obj);
1695                         VM_OBJECT_UNLOCK(obj);
1696                         continue;
1697                 }
1698
1699                 if (lobj->type != OBJT_VNODE || lobj->handle == NULL) {
1700                         if (lobj != obj)
1701                                 VM_OBJECT_UNLOCK(lobj);
1702                         VM_OBJECT_UNLOCK(obj);
1703                         continue;
1704                 }
1705
1706                 /*
1707                  * Skip contiguous regions that point to the same
1708                  * vnode, so we don't emit redundant MAP-IN
1709                  * directives.
1710                  */
1711                 if (entry->start == last_end && lobj->handle == last_vp) {
1712                         last_end = entry->end;
1713                         if (lobj != obj)
1714                                 VM_OBJECT_UNLOCK(lobj);
1715                         VM_OBJECT_UNLOCK(obj);
1716                         continue;
1717                 }
1718
1719                 /* 
1720                  * We don't want to keep the proc's vm_map or this
1721                  * vm_object locked while we walk the pathname, since
1722                  * vn_fullpath() can sleep.  However, if we drop the
1723                  * lock, it's possible for concurrent activity to
1724                  * modify the vm_map list.  To protect against this,
1725                  * we save the vm_map timestamp before we release the
1726                  * lock, and check it after we reacquire the lock
1727                  * below.
1728                  */
1729                 start_addr = entry->start;
1730                 last_end = entry->end;
1731                 last_timestamp = map->timestamp;
1732                 vm_map_unlock_read(map);
1733
1734                 vp = lobj->handle;
1735                 vref(vp);
1736                 if (lobj != obj)
1737                         VM_OBJECT_UNLOCK(lobj);
1738
1739                 VM_OBJECT_UNLOCK(obj);
1740
1741                 freepath = NULL;
1742                 pmc_getfilename(vp, &fullpath, &freepath);
1743                 last_vp = vp;
1744
1745                 locked = VFS_LOCK_GIANT(vp->v_mount);
1746                 vrele(vp);
1747                 VFS_UNLOCK_GIANT(locked);
1748
1749                 vp = NULL;
1750                 pmclog_process_map_in(po, p->p_pid, start_addr, fullpath);
1751                 if (freepath)
1752                         free(freepath, M_TEMP);
1753
1754                 vm_map_lock_read(map);
1755
1756                 /*
1757                  * If our saved timestamp doesn't match, this means
1758                  * that the vm_map was modified out from under us and
1759                  * we can't trust our current "entry" pointer.  Do a
1760                  * new lookup for this entry.  If there is no entry
1761                  * for this address range, vm_map_lookup_entry() will
1762                  * return the previous one, so we always want to go to
1763                  * entry->next on the next loop iteration.
1764                  * 
1765                  * There is an edge condition here that can occur if
1766                  * there is no entry at or before this address.  In
1767                  * this situation, vm_map_lookup_entry returns
1768                  * &map->header, which would cause our loop to abort
1769                  * without processing the rest of the map.  However,
1770                  * in practice this will never happen for process
1771                  * vm_map.  This is because the executable's text
1772                  * segment is the first mapping in the proc's address
1773                  * space, and this mapping is never removed until the
1774                  * process exits, so there will always be a non-header
1775                  * entry at or before the requested address for
1776                  * vm_map_lookup_entry to return.
1777                  */
1778                 if (map->timestamp != last_timestamp)
1779                         vm_map_lookup_entry(map, last_end - 1, &entry);
1780         }
1781
1782         vm_map_unlock_read(map);
1783         vmspace_free(vm);
1784         return;
1785 }
1786
1787 /*
1788  * Log mappings for all processes in the system.
1789  */
1790
1791 static void
1792 pmc_log_all_process_mappings(struct pmc_owner *po)
1793 {
1794         struct proc *p, *top;
1795
1796         sx_assert(&pmc_sx, SX_XLOCKED);
1797
1798         if ((p = pfind(1)) == NULL)
1799                 panic("[pmc,%d] Cannot find init", __LINE__);
1800
1801         PROC_UNLOCK(p);
1802
1803         sx_slock(&proctree_lock);
1804
1805         top = p;
1806
1807         for (;;) {
1808                 pmc_log_process_mappings(po, p);
1809                 if (!LIST_EMPTY(&p->p_children))
1810                         p = LIST_FIRST(&p->p_children);
1811                 else for (;;) {
1812                         if (p == top)
1813                                 goto done;
1814                         if (LIST_NEXT(p, p_sibling)) {
1815                                 p = LIST_NEXT(p, p_sibling);
1816                                 break;
1817                         }
1818                         p = p->p_pptr;
1819                 }
1820         }
1821  done:
1822         sx_sunlock(&proctree_lock);
1823 }
1824
1825 /*
1826  * The 'hook' invoked from the kernel proper
1827  */
1828
1829
1830 #ifdef  DEBUG
1831 const char *pmc_hooknames[] = {
1832         /* these strings correspond to PMC_FN_* in <sys/pmckern.h> */
1833         "",
1834         "EXEC",
1835         "CSW-IN",
1836         "CSW-OUT",
1837         "SAMPLE",
1838         "KLDLOAD",
1839         "KLDUNLOAD",
1840         "MMAP",
1841         "MUNMAP",
1842         "CALLCHAIN-NMI",
1843         "CALLCHAIN-SOFT",
1844         "SOFTSAMPLING"
1845 };
1846 #endif
1847
1848 static int
1849 pmc_hook_handler(struct thread *td, int function, void *arg)
1850 {
1851
1852         PMCDBG(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function,
1853             pmc_hooknames[function], arg);
1854
1855         switch (function)
1856         {
1857
1858         /*
1859          * Process exec()
1860          */
1861
1862         case PMC_FN_PROCESS_EXEC:
1863         {
1864                 char *fullpath, *freepath;
1865                 unsigned int ri;
1866                 int is_using_hwpmcs;
1867                 struct pmc *pm;
1868                 struct proc *p;
1869                 struct pmc_owner *po;
1870                 struct pmc_process *pp;
1871                 struct pmckern_procexec *pk;
1872
1873                 sx_assert(&pmc_sx, SX_XLOCKED);
1874
1875                 p = td->td_proc;
1876                 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
1877
1878                 pk = (struct pmckern_procexec *) arg;
1879
1880                 /* Inform owners of SS mode PMCs of the exec event. */
1881                 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1882                     if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1883                             pmclog_process_procexec(po, PMC_ID_INVALID,
1884                                 p->p_pid, pk->pm_entryaddr, fullpath);
1885
1886                 PROC_LOCK(p);
1887                 is_using_hwpmcs = p->p_flag & P_HWPMC;
1888                 PROC_UNLOCK(p);
1889
1890                 if (!is_using_hwpmcs) {
1891                         if (freepath)
1892                                 free(freepath, M_TEMP);
1893                         break;
1894                 }
1895
1896                 /*
1897                  * PMCs are not inherited across an exec():  remove any
1898                  * PMCs that this process is the owner of.
1899                  */
1900
1901                 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
1902                         pmc_remove_owner(po);
1903                         pmc_destroy_owner_descriptor(po);
1904                 }
1905
1906                 /*
1907                  * If the process being exec'ed is not the target of any
1908                  * PMC, we are done.
1909                  */
1910                 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL) {
1911                         if (freepath)
1912                                 free(freepath, M_TEMP);
1913                         break;
1914                 }
1915
1916                 /*
1917                  * Log the exec event to all monitoring owners.  Skip
1918                  * owners who have already recieved the event because
1919                  * they had system sampling PMCs active.
1920                  */
1921                 for (ri = 0; ri < md->pmd_npmc; ri++)
1922                         if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
1923                                 po = pm->pm_owner;
1924                                 if (po->po_sscount == 0 &&
1925                                     po->po_flags & PMC_PO_OWNS_LOGFILE)
1926                                         pmclog_process_procexec(po, pm->pm_id,
1927                                             p->p_pid, pk->pm_entryaddr,
1928                                             fullpath);
1929                         }
1930
1931                 if (freepath)
1932                         free(freepath, M_TEMP);
1933
1934
1935                 PMCDBG(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d",
1936                     p, p->p_pid, p->p_comm, pk->pm_credentialschanged);
1937
1938                 if (pk->pm_credentialschanged == 0) /* no change */
1939                         break;
1940
1941                 /*
1942                  * If the newly exec()'ed process has a different credential
1943                  * than before, allow it to be the target of a PMC only if
1944                  * the PMC's owner has sufficient priviledge.
1945                  */
1946
1947                 for (ri = 0; ri < md->pmd_npmc; ri++)
1948                         if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL)
1949                                 if (pmc_can_attach(pm, td->td_proc) != 0)
1950                                         pmc_detach_one_process(td->td_proc,
1951                                             pm, PMC_FLAG_NONE);
1952
1953                 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc,
1954                     ("[pmc,%d] Illegal ref count %d on pp %p", __LINE__,
1955                         pp->pp_refcnt, pp));
1956
1957                 /*
1958                  * If this process is no longer the target of any
1959                  * PMCs, we can remove the process entry and free
1960                  * up space.
1961                  */
1962
1963                 if (pp->pp_refcnt == 0) {
1964                         pmc_remove_process_descriptor(pp);
1965                         free(pp, M_PMC);
1966                         break;
1967                 }
1968
1969         }
1970         break;
1971
1972         case PMC_FN_CSW_IN:
1973                 pmc_process_csw_in(td);
1974                 break;
1975
1976         case PMC_FN_CSW_OUT:
1977                 pmc_process_csw_out(td);
1978                 break;
1979
1980         /*
1981          * Process accumulated PC samples.
1982          *
1983          * This function is expected to be called by hardclock() for
1984          * each CPU that has accumulated PC samples.
1985          *
1986          * This function is to be executed on the CPU whose samples
1987          * are being processed.
1988          */
1989         case PMC_FN_DO_SAMPLES:
1990
1991                 /*
1992                  * Clear the cpu specific bit in the CPU mask before
1993                  * do the rest of the processing.  If the NMI handler
1994                  * gets invoked after the "atomic_clear_int()" call
1995                  * below but before "pmc_process_samples()" gets
1996                  * around to processing the interrupt, then we will
1997                  * come back here at the next hardclock() tick (and
1998                  * may find nothing to do if "pmc_process_samples()"
1999                  * had already processed the interrupt).  We don't
2000                  * lose the interrupt sample.
2001                  */
2002                 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmc_cpumask);
2003                 pmc_process_samples(PCPU_GET(cpuid), PMC_HR);
2004                 pmc_process_samples(PCPU_GET(cpuid), PMC_SR);
2005                 break;
2006
2007
2008         case PMC_FN_KLD_LOAD:
2009                 sx_assert(&pmc_sx, SX_LOCKED);
2010                 pmc_process_kld_load((struct pmckern_map_in *) arg);
2011                 break;
2012
2013         case PMC_FN_KLD_UNLOAD:
2014                 sx_assert(&pmc_sx, SX_LOCKED);
2015                 pmc_process_kld_unload((struct pmckern_map_out *) arg);
2016                 break;
2017
2018         case PMC_FN_MMAP:
2019                 sx_assert(&pmc_sx, SX_LOCKED);
2020                 pmc_process_mmap(td, (struct pmckern_map_in *) arg);
2021                 break;
2022
2023         case PMC_FN_MUNMAP:
2024                 sx_assert(&pmc_sx, SX_LOCKED);
2025                 pmc_process_munmap(td, (struct pmckern_map_out *) arg);
2026                 break;
2027
2028         case PMC_FN_USER_CALLCHAIN:
2029                 /*
2030                  * Record a call chain.
2031                  */
2032                 KASSERT(td == curthread, ("[pmc,%d] td != curthread",
2033                     __LINE__));
2034
2035                 pmc_capture_user_callchain(PCPU_GET(cpuid), PMC_HR,
2036                     (struct trapframe *) arg);
2037                 td->td_pflags &= ~TDP_CALLCHAIN;
2038                 break;
2039
2040         case PMC_FN_USER_CALLCHAIN_SOFT:
2041                 /*
2042                  * Record a call chain.
2043                  */
2044                 KASSERT(td == curthread, ("[pmc,%d] td != curthread",
2045                     __LINE__));
2046                 pmc_capture_user_callchain(PCPU_GET(cpuid), PMC_SR,
2047                     (struct trapframe *) arg);
2048                 td->td_pflags &= ~TDP_CALLCHAIN;
2049                 break;
2050
2051         case PMC_FN_SOFT_SAMPLING:
2052                 /*
2053                  * Call soft PMC sampling intr.
2054                  */
2055                 pmc_soft_intr((struct pmckern_soft *) arg);
2056                 break;
2057
2058         default:
2059 #ifdef  DEBUG
2060                 KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function));
2061 #endif
2062                 break;
2063
2064         }
2065
2066         return 0;
2067 }
2068
2069 /*
2070  * allocate a 'struct pmc_owner' descriptor in the owner hash table.
2071  */
2072
2073 static struct pmc_owner *
2074 pmc_allocate_owner_descriptor(struct proc *p)
2075 {
2076         uint32_t hindex;
2077         struct pmc_owner *po;
2078         struct pmc_ownerhash *poh;
2079
2080         hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
2081         poh = &pmc_ownerhash[hindex];
2082
2083         /* allocate space for N pointers and one descriptor struct */
2084         po = malloc(sizeof(struct pmc_owner), M_PMC, M_WAITOK|M_ZERO);
2085         po->po_sscount = po->po_error = po->po_flags = po->po_logprocmaps = 0;
2086         po->po_file  = NULL;
2087         po->po_owner = p;
2088         po->po_kthread = NULL;
2089         LIST_INIT(&po->po_pmcs);
2090         LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */
2091
2092         TAILQ_INIT(&po->po_logbuffers);
2093         mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc-per-proc", MTX_SPIN);
2094
2095         PMCDBG(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p",
2096             p, p->p_pid, p->p_comm, po);
2097
2098         return po;
2099 }
2100
2101 static void
2102 pmc_destroy_owner_descriptor(struct pmc_owner *po)
2103 {
2104
2105         PMCDBG(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)",
2106             po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm);
2107
2108         mtx_destroy(&po->po_mtx);
2109         free(po, M_PMC);
2110 }
2111
2112 /*
2113  * find the descriptor corresponding to process 'p', adding or removing it
2114  * as specified by 'mode'.
2115  */
2116
2117 static struct pmc_process *
2118 pmc_find_process_descriptor(struct proc *p, uint32_t mode)
2119 {
2120         uint32_t hindex;
2121         struct pmc_process *pp, *ppnew;
2122         struct pmc_processhash *pph;
2123
2124         hindex = PMC_HASH_PTR(p, pmc_processhashmask);
2125         pph = &pmc_processhash[hindex];
2126
2127         ppnew = NULL;
2128
2129         /*
2130          * Pre-allocate memory in the FIND_ALLOCATE case since we
2131          * cannot call malloc(9) once we hold a spin lock.
2132          */
2133         if (mode & PMC_FLAG_ALLOCATE)
2134                 ppnew = malloc(sizeof(struct pmc_process) + md->pmd_npmc *
2135                     sizeof(struct pmc_targetstate), M_PMC, M_WAITOK|M_ZERO);
2136
2137         mtx_lock_spin(&pmc_processhash_mtx);
2138         LIST_FOREACH(pp, pph, pp_next)
2139             if (pp->pp_proc == p)
2140                     break;
2141
2142         if ((mode & PMC_FLAG_REMOVE) && pp != NULL)
2143                 LIST_REMOVE(pp, pp_next);
2144
2145         if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL &&
2146             ppnew != NULL) {
2147                 ppnew->pp_proc = p;
2148                 LIST_INSERT_HEAD(pph, ppnew, pp_next);
2149                 pp = ppnew;
2150                 ppnew = NULL;
2151         }
2152         mtx_unlock_spin(&pmc_processhash_mtx);
2153
2154         if (pp != NULL && ppnew != NULL)
2155                 free(ppnew, M_PMC);
2156
2157         return pp;
2158 }
2159
2160 /*
2161  * remove a process descriptor from the process hash table.
2162  */
2163
2164 static void
2165 pmc_remove_process_descriptor(struct pmc_process *pp)
2166 {
2167         KASSERT(pp->pp_refcnt == 0,
2168             ("[pmc,%d] Removing process descriptor %p with count %d",
2169                 __LINE__, pp, pp->pp_refcnt));
2170
2171         mtx_lock_spin(&pmc_processhash_mtx);
2172         LIST_REMOVE(pp, pp_next);
2173         mtx_unlock_spin(&pmc_processhash_mtx);
2174 }
2175
2176
2177 /*
2178  * find an owner descriptor corresponding to proc 'p'
2179  */
2180
2181 static struct pmc_owner *
2182 pmc_find_owner_descriptor(struct proc *p)
2183 {
2184         uint32_t hindex;
2185         struct pmc_owner *po;
2186         struct pmc_ownerhash *poh;
2187
2188         hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
2189         poh = &pmc_ownerhash[hindex];
2190
2191         po = NULL;
2192         LIST_FOREACH(po, poh, po_next)
2193             if (po->po_owner == p)
2194                     break;
2195
2196         PMCDBG(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> "
2197             "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po);
2198
2199         return po;
2200 }
2201
2202 /*
2203  * pmc_allocate_pmc_descriptor
2204  *
2205  * Allocate a pmc descriptor and initialize its
2206  * fields.
2207  */
2208
2209 static struct pmc *
2210 pmc_allocate_pmc_descriptor(void)
2211 {
2212         struct pmc *pmc;
2213
2214         pmc = malloc(sizeof(struct pmc), M_PMC, M_WAITOK|M_ZERO);
2215
2216         if (pmc != NULL) {
2217                 pmc->pm_owner = NULL;
2218                 LIST_INIT(&pmc->pm_targets);
2219         }
2220
2221         PMCDBG(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc);
2222
2223         return pmc;
2224 }
2225
2226 /*
2227  * Destroy a pmc descriptor.
2228  */
2229
2230 static void
2231 pmc_destroy_pmc_descriptor(struct pmc *pm)
2232 {
2233         (void) pm;
2234
2235 #ifdef  DEBUG
2236         KASSERT(pm->pm_state == PMC_STATE_DELETED ||
2237             pm->pm_state == PMC_STATE_FREE,
2238             ("[pmc,%d] destroying non-deleted PMC", __LINE__));
2239         KASSERT(LIST_EMPTY(&pm->pm_targets),
2240             ("[pmc,%d] destroying pmc with targets", __LINE__));
2241         KASSERT(pm->pm_owner == NULL,
2242             ("[pmc,%d] destroying pmc attached to an owner", __LINE__));
2243         KASSERT(pm->pm_runcount == 0,
2244             ("[pmc,%d] pmc has non-zero run count %d", __LINE__,
2245                 pm->pm_runcount));
2246 #endif
2247 }
2248
2249 static void
2250 pmc_wait_for_pmc_idle(struct pmc *pm)
2251 {
2252 #ifdef DEBUG
2253         volatile int maxloop;
2254
2255         maxloop = 100 * pmc_cpu_max();
2256 #endif
2257         /*
2258          * Loop (with a forced context switch) till the PMC's runcount
2259          * comes down to zero.
2260          */
2261         while (atomic_load_acq_32(&pm->pm_runcount) > 0) {
2262 #ifdef DEBUG
2263                 maxloop--;
2264                 KASSERT(maxloop > 0,
2265                     ("[pmc,%d] (ri%d, rc%d) waiting too long for "
2266                         "pmc to be free", __LINE__,
2267                         PMC_TO_ROWINDEX(pm), pm->pm_runcount));
2268 #endif
2269                 pmc_force_context_switch();
2270         }
2271 }
2272
2273 /*
2274  * This function does the following things:
2275  *
2276  *  - detaches the PMC from hardware
2277  *  - unlinks all target threads that were attached to it
2278  *  - removes the PMC from its owner's list
2279  *  - destroy's the PMC private mutex
2280  *
2281  * Once this function completes, the given pmc pointer can be safely
2282  * FREE'd by the caller.
2283  */
2284
2285 static void
2286 pmc_release_pmc_descriptor(struct pmc *pm)
2287 {
2288         enum pmc_mode mode;
2289         struct pmc_hw *phw;
2290         u_int adjri, ri, cpu;
2291         struct pmc_owner *po;
2292         struct pmc_binding pb;
2293         struct pmc_process *pp;
2294         struct pmc_classdep *pcd;
2295         struct pmc_target *ptgt, *tmp;
2296
2297         sx_assert(&pmc_sx, SX_XLOCKED);
2298
2299         KASSERT(pm, ("[pmc,%d] null pmc", __LINE__));
2300
2301         ri   = PMC_TO_ROWINDEX(pm);
2302         pcd  = pmc_ri_to_classdep(md, ri, &adjri);
2303         mode = PMC_TO_MODE(pm);
2304
2305         PMCDBG(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri,
2306             mode);
2307
2308         /*
2309          * First, we take the PMC off hardware.
2310          */
2311         cpu = 0;
2312         if (PMC_IS_SYSTEM_MODE(mode)) {
2313
2314                 /*
2315                  * A system mode PMC runs on a specific CPU.  Switch
2316                  * to this CPU and turn hardware off.
2317                  */
2318                 pmc_save_cpu_binding(&pb);
2319
2320                 cpu = PMC_TO_CPU(pm);
2321
2322                 pmc_select_cpu(cpu);
2323
2324                 /* switch off non-stalled CPUs */
2325                 if (pm->pm_state == PMC_STATE_RUNNING &&
2326                     pm->pm_stalled == 0) {
2327
2328                         phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
2329
2330                         KASSERT(phw->phw_pmc == pm,
2331                             ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)",
2332                                 __LINE__, ri, phw->phw_pmc, pm));
2333                         PMCDBG(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri);
2334
2335                         critical_enter();
2336                         pcd->pcd_stop_pmc(cpu, adjri);
2337                         critical_exit();
2338                 }
2339
2340                 PMCDBG(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri);
2341
2342                 critical_enter();
2343                 pcd->pcd_config_pmc(cpu, adjri, NULL);
2344                 critical_exit();
2345
2346                 /* adjust the global and process count of SS mode PMCs */
2347                 if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) {
2348                         po = pm->pm_owner;
2349                         po->po_sscount--;
2350                         if (po->po_sscount == 0) {
2351                                 atomic_subtract_rel_int(&pmc_ss_count, 1);
2352                                 LIST_REMOVE(po, po_ssnext);
2353                         }
2354                 }
2355
2356                 pm->pm_state = PMC_STATE_DELETED;
2357
2358                 pmc_restore_cpu_binding(&pb);
2359
2360                 /*
2361                  * We could have references to this PMC structure in
2362                  * the per-cpu sample queues.  Wait for the queue to
2363                  * drain.
2364                  */
2365                 pmc_wait_for_pmc_idle(pm);
2366
2367         } else if (PMC_IS_VIRTUAL_MODE(mode)) {
2368
2369                 /*
2370                  * A virtual PMC could be running on multiple CPUs at
2371                  * a given instant.
2372                  *
2373                  * By marking its state as DELETED, we ensure that
2374                  * this PMC is never further scheduled on hardware.
2375                  *
2376                  * Then we wait till all CPUs are done with this PMC.
2377                  */
2378                 pm->pm_state = PMC_STATE_DELETED;
2379
2380
2381                 /* Wait for the PMCs runcount to come to zero. */
2382                 pmc_wait_for_pmc_idle(pm);
2383
2384                 /*
2385                  * At this point the PMC is off all CPUs and cannot be
2386                  * freshly scheduled onto a CPU.  It is now safe to
2387                  * unlink all targets from this PMC.  If a
2388                  * process-record's refcount falls to zero, we remove
2389                  * it from the hash table.  The module-wide SX lock
2390                  * protects us from races.
2391                  */
2392                 LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) {
2393                         pp = ptgt->pt_process;
2394                         pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */
2395
2396                         PMCDBG(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt);
2397
2398                         /*
2399                          * If the target process record shows that no
2400                          * PMCs are attached to it, reclaim its space.
2401                          */
2402
2403                         if (pp->pp_refcnt == 0) {
2404                                 pmc_remove_process_descriptor(pp);
2405                                 free(pp, M_PMC);
2406                         }
2407                 }
2408
2409                 cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */
2410
2411         }
2412
2413         /*
2414          * Release any MD resources
2415          */
2416         (void) pcd->pcd_release_pmc(cpu, adjri, pm);
2417
2418         /*
2419          * Update row disposition
2420          */
2421
2422         if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm)))
2423                 PMC_UNMARK_ROW_STANDALONE(ri);
2424         else
2425                 PMC_UNMARK_ROW_THREAD(ri);
2426
2427         /* unlink from the owner's list */
2428         if (pm->pm_owner) {
2429                 LIST_REMOVE(pm, pm_next);
2430                 pm->pm_owner = NULL;
2431         }
2432
2433         pmc_destroy_pmc_descriptor(pm);
2434 }
2435
2436 /*
2437  * Register an owner and a pmc.
2438  */
2439
2440 static int
2441 pmc_register_owner(struct proc *p, struct pmc *pmc)
2442 {
2443         struct pmc_owner *po;
2444
2445         sx_assert(&pmc_sx, SX_XLOCKED);
2446
2447         if ((po = pmc_find_owner_descriptor(p)) == NULL)
2448                 if ((po = pmc_allocate_owner_descriptor(p)) == NULL)
2449                         return ENOMEM;
2450
2451         KASSERT(pmc->pm_owner == NULL,
2452             ("[pmc,%d] attempting to own an initialized PMC", __LINE__));
2453         pmc->pm_owner  = po;
2454
2455         LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next);
2456
2457         PROC_LOCK(p);
2458         p->p_flag |= P_HWPMC;
2459         PROC_UNLOCK(p);
2460
2461         if (po->po_flags & PMC_PO_OWNS_LOGFILE)
2462                 pmclog_process_pmcallocate(pmc);
2463
2464         PMCDBG(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p",
2465             po, pmc);
2466
2467         return 0;
2468 }
2469
2470 /*
2471  * Return the current row disposition:
2472  * == 0 => FREE
2473  *  > 0 => PROCESS MODE
2474  *  < 0 => SYSTEM MODE
2475  */
2476
2477 int
2478 pmc_getrowdisp(int ri)
2479 {
2480         return pmc_pmcdisp[ri];
2481 }
2482
2483 /*
2484  * Check if a PMC at row index 'ri' can be allocated to the current
2485  * process.
2486  *
2487  * Allocation can fail if:
2488  *   - the current process is already being profiled by a PMC at index 'ri',
2489  *     attached to it via OP_PMCATTACH.
2490  *   - the current process has already allocated a PMC at index 'ri'
2491  *     via OP_ALLOCATE.
2492  */
2493
2494 static int
2495 pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu)
2496 {
2497         enum pmc_mode mode;
2498         struct pmc *pm;
2499         struct pmc_owner *po;
2500         struct pmc_process *pp;
2501
2502         PMCDBG(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d "
2503             "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu);
2504
2505         /*
2506          * We shouldn't have already allocated a process-mode PMC at
2507          * row index 'ri'.
2508          *
2509          * We shouldn't have allocated a system-wide PMC on the same
2510          * CPU and same RI.
2511          */
2512         if ((po = pmc_find_owner_descriptor(p)) != NULL)
2513                 LIST_FOREACH(pm, &po->po_pmcs, pm_next) {
2514                     if (PMC_TO_ROWINDEX(pm) == ri) {
2515                             mode = PMC_TO_MODE(pm);
2516                             if (PMC_IS_VIRTUAL_MODE(mode))
2517                                     return EEXIST;
2518                             if (PMC_IS_SYSTEM_MODE(mode) &&
2519                                 (int) PMC_TO_CPU(pm) == cpu)
2520                                     return EEXIST;
2521                     }
2522                 }
2523
2524         /*
2525          * We also shouldn't be the target of any PMC at this index
2526          * since otherwise a PMC_ATTACH to ourselves will fail.
2527          */
2528         if ((pp = pmc_find_process_descriptor(p, 0)) != NULL)
2529                 if (pp->pp_pmcs[ri].pp_pmc)
2530                         return EEXIST;
2531
2532         PMCDBG(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok",
2533             p, p->p_pid, p->p_comm, ri);
2534
2535         return 0;
2536 }
2537
2538 /*
2539  * Check if a given PMC at row index 'ri' can be currently used in
2540  * mode 'mode'.
2541  */
2542
2543 static int
2544 pmc_can_allocate_row(int ri, enum pmc_mode mode)
2545 {
2546         enum pmc_disp   disp;
2547
2548         sx_assert(&pmc_sx, SX_XLOCKED);
2549
2550         PMCDBG(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode);
2551
2552         if (PMC_IS_SYSTEM_MODE(mode))
2553                 disp = PMC_DISP_STANDALONE;
2554         else
2555                 disp = PMC_DISP_THREAD;
2556
2557         /*
2558          * check disposition for PMC row 'ri':
2559          *
2560          * Expected disposition         Row-disposition         Result
2561          *
2562          * STANDALONE                   STANDALONE or FREE      proceed
2563          * STANDALONE                   THREAD                  fail
2564          * THREAD                       THREAD or FREE          proceed
2565          * THREAD                       STANDALONE              fail
2566          */
2567
2568         if (!PMC_ROW_DISP_IS_FREE(ri) &&
2569             !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)) &&
2570             !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri)))
2571                 return EBUSY;
2572
2573         /*
2574          * All OK
2575          */
2576
2577         PMCDBG(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode);
2578
2579         return 0;
2580
2581 }
2582
2583 /*
2584  * Find a PMC descriptor with user handle 'pmcid' for thread 'td'.
2585  */
2586
2587 static struct pmc *
2588 pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid)
2589 {
2590         struct pmc *pm;
2591
2592         KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc,
2593             ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__,
2594                 PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc));
2595
2596         LIST_FOREACH(pm, &po->po_pmcs, pm_next)
2597             if (pm->pm_id == pmcid)
2598                     return pm;
2599
2600         return NULL;
2601 }
2602
2603 static int
2604 pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc)
2605 {
2606
2607         struct pmc *pm;
2608         struct pmc_owner *po;
2609
2610         PMCDBG(PMC,FND,1, "find-pmc id=%d", pmcid);
2611
2612         if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL)
2613                 return ESRCH;
2614
2615         if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL)
2616                 return EINVAL;
2617
2618         PMCDBG(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm);
2619
2620         *pmc = pm;
2621         return 0;
2622 }
2623
2624 /*
2625  * Start a PMC.
2626  */
2627
2628 static int
2629 pmc_start(struct pmc *pm)
2630 {
2631         enum pmc_mode mode;
2632         struct pmc_owner *po;
2633         struct pmc_binding pb;
2634         struct pmc_classdep *pcd;
2635         int adjri, error, cpu, ri;
2636
2637         KASSERT(pm != NULL,
2638             ("[pmc,%d] null pm", __LINE__));
2639
2640         mode = PMC_TO_MODE(pm);
2641         ri   = PMC_TO_ROWINDEX(pm);
2642         pcd  = pmc_ri_to_classdep(md, ri, &adjri);
2643
2644         error = 0;
2645
2646         PMCDBG(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, mode, ri);
2647
2648         po = pm->pm_owner;
2649
2650         /*
2651          * Disallow PMCSTART if a logfile is required but has not been
2652          * configured yet.
2653          */
2654         if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) &&
2655             (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
2656                 return (EDOOFUS);       /* programming error */
2657
2658         /*
2659          * If this is a sampling mode PMC, log mapping information for
2660          * the kernel modules that are currently loaded.
2661          */
2662         if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
2663             pmc_log_kernel_mappings(pm);
2664
2665         if (PMC_IS_VIRTUAL_MODE(mode)) {
2666
2667                 /*
2668                  * If a PMCATTACH has never been done on this PMC,
2669                  * attach it to its owner process.
2670                  */
2671
2672                 if (LIST_EMPTY(&pm->pm_targets))
2673                         error = (pm->pm_flags & PMC_F_ATTACH_DONE) ? ESRCH :
2674                             pmc_attach_process(po->po_owner, pm);
2675
2676                 /*
2677                  * If the PMC is attached to its owner, then force a context
2678                  * switch to ensure that the MD state gets set correctly.
2679                  */
2680
2681                 if (error == 0) {
2682                         pm->pm_state = PMC_STATE_RUNNING;
2683                         if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER)
2684                                 pmc_force_context_switch();
2685                 }
2686
2687                 return (error);
2688         }
2689
2690
2691         /*
2692          * A system-wide PMC.
2693          *
2694          * Add the owner to the global list if this is a system-wide
2695          * sampling PMC.
2696          */
2697
2698         if (mode == PMC_MODE_SS) {
2699                 if (po->po_sscount == 0) {
2700                         LIST_INSERT_HEAD(&pmc_ss_owners, po, po_ssnext);
2701                         atomic_add_rel_int(&pmc_ss_count, 1);
2702                         PMCDBG(PMC,OPS,1, "po=%p in global list", po);
2703                 }
2704                 po->po_sscount++;
2705
2706                 /*
2707                  * Log mapping information for all existing processes in the
2708                  * system.  Subsequent mappings are logged as they happen;
2709                  * see pmc_process_mmap().
2710                  */
2711                 if (po->po_logprocmaps == 0) {
2712                         pmc_log_all_process_mappings(po);
2713                         po->po_logprocmaps = 1;
2714                 }
2715         }
2716
2717         /*
2718          * Move to the CPU associated with this
2719          * PMC, and start the hardware.
2720          */
2721
2722         pmc_save_cpu_binding(&pb);
2723
2724         cpu = PMC_TO_CPU(pm);
2725
2726         if (!pmc_cpu_is_active(cpu))
2727                 return (ENXIO);
2728
2729         pmc_select_cpu(cpu);
2730
2731         /*
2732          * global PMCs are configured at allocation time
2733          * so write out the initial value and start the PMC.
2734          */
2735
2736         pm->pm_state = PMC_STATE_RUNNING;
2737
2738         critical_enter();
2739         if ((error = pcd->pcd_write_pmc(cpu, adjri,
2740                  PMC_IS_SAMPLING_MODE(mode) ?
2741                  pm->pm_sc.pm_reloadcount :
2742                  pm->pm_sc.pm_initial)) == 0)
2743                 error = pcd->pcd_start_pmc(cpu, adjri);
2744         critical_exit();
2745
2746         pmc_restore_cpu_binding(&pb);
2747
2748         return (error);
2749 }
2750
2751 /*
2752  * Stop a PMC.
2753  */
2754
2755 static int
2756 pmc_stop(struct pmc *pm)
2757 {
2758         struct pmc_owner *po;
2759         struct pmc_binding pb;
2760         struct pmc_classdep *pcd;
2761         int adjri, cpu, error, ri;
2762
2763         KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__));
2764
2765         PMCDBG(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm,
2766             PMC_TO_MODE(pm), PMC_TO_ROWINDEX(pm));
2767
2768         pm->pm_state = PMC_STATE_STOPPED;
2769
2770         /*
2771          * If the PMC is a virtual mode one, changing the state to
2772          * non-RUNNING is enough to ensure that the PMC never gets
2773          * scheduled.
2774          *
2775          * If this PMC is current running on a CPU, then it will
2776          * handled correctly at the time its target process is context
2777          * switched out.
2778          */
2779
2780         if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
2781                 return 0;
2782
2783         /*
2784          * A system-mode PMC.  Move to the CPU associated with
2785          * this PMC, and stop the hardware.  We update the
2786          * 'initial count' so that a subsequent PMCSTART will
2787          * resume counting from the current hardware count.
2788          */
2789
2790         pmc_save_cpu_binding(&pb);
2791
2792         cpu = PMC_TO_CPU(pm);
2793
2794         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
2795             ("[pmc,%d] illegal cpu=%d", __LINE__, cpu));
2796
2797         if (!pmc_cpu_is_active(cpu))
2798                 return ENXIO;
2799
2800         pmc_select_cpu(cpu);
2801
2802         ri = PMC_TO_ROWINDEX(pm);
2803         pcd = pmc_ri_to_classdep(md, ri, &adjri);
2804
2805         critical_enter();
2806         if ((error = pcd->pcd_stop_pmc(cpu, adjri)) == 0)
2807                 error = pcd->pcd_read_pmc(cpu, adjri, &pm->pm_sc.pm_initial);
2808         critical_exit();
2809
2810         pmc_restore_cpu_binding(&pb);
2811
2812         po = pm->pm_owner;
2813
2814         /* remove this owner from the global list of SS PMC owners */
2815         if (PMC_TO_MODE(pm) == PMC_MODE_SS) {
2816                 po->po_sscount--;
2817                 if (po->po_sscount == 0) {
2818                         atomic_subtract_rel_int(&pmc_ss_count, 1);
2819                         LIST_REMOVE(po, po_ssnext);
2820                         PMCDBG(PMC,OPS,2,"po=%p removed from global list", po);
2821                 }
2822         }
2823
2824         return (error);
2825 }
2826
2827
2828 #ifdef  DEBUG
2829 static const char *pmc_op_to_name[] = {
2830 #undef  __PMC_OP
2831 #define __PMC_OP(N, D)  #N ,
2832         __PMC_OPS()
2833         NULL
2834 };
2835 #endif
2836
2837 /*
2838  * The syscall interface
2839  */
2840
2841 #define PMC_GET_SX_XLOCK(...) do {              \
2842         sx_xlock(&pmc_sx);                      \
2843         if (pmc_hook == NULL) {                 \
2844                 sx_xunlock(&pmc_sx);            \
2845                 return __VA_ARGS__;             \
2846         }                                       \
2847 } while (0)
2848
2849 #define PMC_DOWNGRADE_SX() do {                 \
2850         sx_downgrade(&pmc_sx);                  \
2851         is_sx_downgraded = 1;                   \
2852 } while (0)
2853
2854 static int
2855 pmc_syscall_handler(struct thread *td, void *syscall_args)
2856 {
2857         int error, is_sx_downgraded, is_sx_locked, op;
2858         struct pmc_syscall_args *c;
2859         void *arg;
2860
2861         PMC_GET_SX_XLOCK(ENOSYS);
2862
2863         DROP_GIANT();
2864
2865         is_sx_downgraded = 0;
2866         is_sx_locked = 1;
2867
2868         c = (struct pmc_syscall_args *) syscall_args;
2869
2870         op = c->pmop_code;
2871         arg = c->pmop_data;
2872
2873         PMCDBG(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op,
2874             pmc_op_to_name[op], arg);
2875
2876         error = 0;
2877         atomic_add_int(&pmc_stats.pm_syscalls, 1);
2878
2879         switch(op)
2880         {
2881
2882
2883         /*
2884          * Configure a log file.
2885          *
2886          * XXX This OP will be reworked.
2887          */
2888
2889         case PMC_OP_CONFIGURELOG:
2890         {
2891                 struct proc *p;
2892                 struct pmc *pm;
2893                 struct pmc_owner *po;
2894                 struct pmc_op_configurelog cl;
2895
2896                 sx_assert(&pmc_sx, SX_XLOCKED);
2897
2898                 if ((error = copyin(arg, &cl, sizeof(cl))) != 0)
2899                         break;
2900
2901                 /* mark this process as owning a log file */
2902                 p = td->td_proc;
2903                 if ((po = pmc_find_owner_descriptor(p)) == NULL)
2904                         if ((po = pmc_allocate_owner_descriptor(p)) == NULL) {
2905                                 error = ENOMEM;
2906                                 break;
2907                         }
2908
2909                 /*
2910                  * If a valid fd was passed in, try to configure that,
2911                  * otherwise if 'fd' was less than zero and there was
2912                  * a log file configured, flush its buffers and
2913                  * de-configure it.
2914                  */
2915                 if (cl.pm_logfd >= 0) {
2916                         sx_xunlock(&pmc_sx);
2917                         is_sx_locked = 0;
2918                         error = pmclog_configure_log(md, po, cl.pm_logfd);
2919                 } else if (po->po_flags & PMC_PO_OWNS_LOGFILE) {
2920                         pmclog_process_closelog(po);
2921                         error = pmclog_close(po);
2922                         if (error == 0) {
2923                                 LIST_FOREACH(pm, &po->po_pmcs, pm_next)
2924                                     if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
2925                                         pm->pm_state == PMC_STATE_RUNNING)
2926                                             pmc_stop(pm);
2927                                 error = pmclog_deconfigure_log(po);
2928                         }
2929                 } else
2930                         error = EINVAL;
2931
2932                 if (error)
2933                         break;
2934         }
2935         break;
2936
2937         /*
2938          * Flush a log file.
2939          */
2940
2941         case PMC_OP_FLUSHLOG:
2942         {
2943                 struct pmc_owner *po;
2944
2945                 sx_assert(&pmc_sx, SX_XLOCKED);
2946
2947                 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
2948                         error = EINVAL;
2949                         break;
2950                 }
2951
2952                 error = pmclog_flush(po);
2953         }
2954         break;
2955
2956         /*
2957          * Close a log file.
2958          */
2959
2960         case PMC_OP_CLOSELOG:
2961         {
2962                 struct pmc_owner *po;
2963
2964                 sx_assert(&pmc_sx, SX_XLOCKED);
2965
2966                 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
2967                         error = EINVAL;
2968                         break;
2969                 }
2970
2971                 error = pmclog_close(po);
2972         }
2973         break;
2974
2975         /*
2976          * Retrieve hardware configuration.
2977          */
2978
2979         case PMC_OP_GETCPUINFO: /* CPU information */
2980         {
2981                 struct pmc_op_getcpuinfo gci;
2982                 struct pmc_classinfo *pci;
2983                 struct pmc_classdep *pcd;
2984                 int cl;
2985
2986                 gci.pm_cputype = md->pmd_cputype;
2987                 gci.pm_ncpu    = pmc_cpu_max();
2988                 gci.pm_npmc    = md->pmd_npmc;
2989                 gci.pm_nclass  = md->pmd_nclass;
2990                 pci = gci.pm_classes;
2991                 pcd = md->pmd_classdep;
2992                 for (cl = 0; cl < md->pmd_nclass; cl++, pci++, pcd++) {
2993                         pci->pm_caps  = pcd->pcd_caps;
2994                         pci->pm_class = pcd->pcd_class;
2995                         pci->pm_width = pcd->pcd_width;
2996                         pci->pm_num   = pcd->pcd_num;
2997                 }
2998                 error = copyout(&gci, arg, sizeof(gci));
2999         }
3000         break;
3001
3002         /*
3003          * Retrieve soft events list.
3004          */
3005         case PMC_OP_GETDYNEVENTINFO:
3006         {
3007                 enum pmc_class                  cl;
3008                 enum pmc_event                  ev;
3009                 struct pmc_op_getdyneventinfo   *gei;
3010                 struct pmc_dyn_event_descr      dev;
3011                 struct pmc_soft                 *ps;
3012                 uint32_t                        nevent;
3013
3014                 sx_assert(&pmc_sx, SX_LOCKED);
3015
3016                 gei = (struct pmc_op_getdyneventinfo *) arg;
3017
3018                 if ((error = copyin(&gei->pm_class, &cl, sizeof(cl))) != 0)
3019                         break;
3020
3021                 /* Only SOFT class is dynamic. */
3022                 if (cl != PMC_CLASS_SOFT) {
3023                         error = EINVAL;
3024                         break;
3025                 }
3026
3027                 nevent = 0;
3028                 for (ev = PMC_EV_SOFT_FIRST; ev <= PMC_EV_SOFT_LAST; ev++) {
3029                         ps = pmc_soft_ev_acquire(ev);
3030                         if (ps == NULL)
3031                                 continue;
3032                         bcopy(&ps->ps_ev, &dev, sizeof(dev));
3033                         pmc_soft_ev_release(ps);
3034
3035                         error = copyout(&dev,
3036                             &gei->pm_events[nevent],
3037                             sizeof(struct pmc_dyn_event_descr));
3038                         if (error != 0)
3039                                 break;
3040                         nevent++;
3041                 }
3042                 if (error != 0)
3043                         break;
3044
3045                 error = copyout(&nevent, &gei->pm_nevent,
3046                     sizeof(nevent));
3047         }
3048         break;
3049
3050         /*
3051          * Get module statistics
3052          */
3053
3054         case PMC_OP_GETDRIVERSTATS:
3055         {
3056                 struct pmc_op_getdriverstats gms;
3057
3058                 bcopy(&pmc_stats, &gms, sizeof(gms));
3059                 error = copyout(&gms, arg, sizeof(gms));
3060         }
3061         break;
3062
3063
3064         /*
3065          * Retrieve module version number
3066          */
3067
3068         case PMC_OP_GETMODULEVERSION:
3069         {
3070                 uint32_t cv, modv;
3071
3072                 /* retrieve the client's idea of the ABI version */
3073                 if ((error = copyin(arg, &cv, sizeof(uint32_t))) != 0)
3074                         break;
3075                 /* don't service clients newer than our driver */
3076                 modv = PMC_VERSION;
3077                 if ((cv & 0xFFFF0000) > (modv & 0xFFFF0000)) {
3078                         error = EPROGMISMATCH;
3079                         break;
3080                 }
3081                 error = copyout(&modv, arg, sizeof(int));
3082         }
3083         break;
3084
3085
3086         /*
3087          * Retrieve the state of all the PMCs on a given
3088          * CPU.
3089          */
3090
3091         case PMC_OP_GETPMCINFO:
3092         {
3093                 int ari;
3094                 struct pmc *pm;
3095                 size_t pmcinfo_size;
3096                 uint32_t cpu, n, npmc;
3097                 struct pmc_owner *po;
3098                 struct pmc_binding pb;
3099                 struct pmc_classdep *pcd;
3100                 struct pmc_info *p, *pmcinfo;
3101                 struct pmc_op_getpmcinfo *gpi;
3102
3103                 PMC_DOWNGRADE_SX();
3104
3105                 gpi = (struct pmc_op_getpmcinfo *) arg;
3106
3107                 if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0)
3108                         break;
3109
3110                 if (cpu >= pmc_cpu_max()) {
3111                         error = EINVAL;
3112                         break;
3113                 }
3114
3115                 if (!pmc_cpu_is_active(cpu)) {
3116                         error = ENXIO;
3117                         break;
3118                 }
3119
3120                 /* switch to CPU 'cpu' */
3121                 pmc_save_cpu_binding(&pb);
3122                 pmc_select_cpu(cpu);
3123
3124                 npmc = md->pmd_npmc;
3125
3126                 pmcinfo_size = npmc * sizeof(struct pmc_info);
3127                 pmcinfo = malloc(pmcinfo_size, M_PMC, M_WAITOK);
3128
3129                 p = pmcinfo;
3130
3131                 for (n = 0; n < md->pmd_npmc; n++, p++) {
3132
3133                         pcd = pmc_ri_to_classdep(md, n, &ari);
3134
3135                         KASSERT(pcd != NULL,
3136                             ("[pmc,%d] null pcd ri=%d", __LINE__, n));
3137
3138                         if ((error = pcd->pcd_describe(cpu, ari, p, &pm)) != 0)
3139                                 break;
3140
3141                         if (PMC_ROW_DISP_IS_STANDALONE(n))
3142                                 p->pm_rowdisp = PMC_DISP_STANDALONE;
3143                         else if (PMC_ROW_DISP_IS_THREAD(n))
3144                                 p->pm_rowdisp = PMC_DISP_THREAD;
3145                         else
3146                                 p->pm_rowdisp = PMC_DISP_FREE;
3147
3148                         p->pm_ownerpid = -1;
3149
3150                         if (pm == NULL) /* no PMC associated */
3151                                 continue;
3152
3153                         po = pm->pm_owner;
3154
3155                         KASSERT(po->po_owner != NULL,
3156                             ("[pmc,%d] pmc_owner had a null proc pointer",
3157                                 __LINE__));
3158
3159                         p->pm_ownerpid = po->po_owner->p_pid;
3160                         p->pm_mode     = PMC_TO_MODE(pm);
3161                         p->pm_event    = pm->pm_event;
3162                         p->pm_flags    = pm->pm_flags;
3163
3164                         if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
3165                                 p->pm_reloadcount =
3166                                     pm->pm_sc.pm_reloadcount;
3167                 }
3168
3169                 pmc_restore_cpu_binding(&pb);
3170
3171                 /* now copy out the PMC info collected */
3172                 if (error == 0)
3173                         error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size);
3174
3175                 free(pmcinfo, M_PMC);
3176         }
3177         break;
3178
3179
3180         /*
3181          * Set the administrative state of a PMC.  I.e. whether
3182          * the PMC is to be used or not.
3183          */
3184
3185         case PMC_OP_PMCADMIN:
3186         {
3187                 int cpu, ri;
3188                 enum pmc_state request;
3189                 struct pmc_cpu *pc;
3190                 struct pmc_hw *phw;
3191                 struct pmc_op_pmcadmin pma;
3192                 struct pmc_binding pb;
3193
3194                 sx_assert(&pmc_sx, SX_XLOCKED);
3195
3196                 KASSERT(td == curthread,
3197                     ("[pmc,%d] td != curthread", __LINE__));
3198
3199                 error = priv_check(td, PRIV_PMC_MANAGE);
3200                 if (error)
3201                         break;
3202
3203                 if ((error = copyin(arg, &pma, sizeof(pma))) != 0)
3204                         break;
3205
3206                 cpu = pma.pm_cpu;
3207
3208                 if (cpu < 0 || cpu >= (int) pmc_cpu_max()) {
3209                         error = EINVAL;
3210                         break;
3211                 }
3212
3213                 if (!pmc_cpu_is_active(cpu)) {
3214                         error = ENXIO;
3215                         break;
3216                 }
3217
3218                 request = pma.pm_state;
3219
3220                 if (request != PMC_STATE_DISABLED &&
3221                     request != PMC_STATE_FREE) {
3222                         error = EINVAL;
3223                         break;
3224                 }
3225
3226                 ri = pma.pm_pmc; /* pmc id == row index */
3227                 if (ri < 0 || ri >= (int) md->pmd_npmc) {
3228                         error = EINVAL;
3229                         break;
3230                 }
3231
3232                 /*
3233                  * We can't disable a PMC with a row-index allocated
3234                  * for process virtual PMCs.
3235                  */
3236
3237                 if (PMC_ROW_DISP_IS_THREAD(ri) &&
3238                     request == PMC_STATE_DISABLED) {
3239                         error = EBUSY;
3240                         break;
3241                 }
3242
3243                 /*
3244                  * otherwise, this PMC on this CPU is either free or
3245                  * in system-wide mode.
3246                  */
3247
3248                 pmc_save_cpu_binding(&pb);
3249                 pmc_select_cpu(cpu);
3250
3251                 pc  = pmc_pcpu[cpu];
3252                 phw = pc->pc_hwpmcs[ri];
3253
3254                 /*
3255                  * XXX do we need some kind of 'forced' disable?
3256                  */
3257
3258                 if (phw->phw_pmc == NULL) {
3259                         if (request == PMC_STATE_DISABLED &&
3260                             (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) {
3261                                 phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED;
3262                                 PMC_MARK_ROW_STANDALONE(ri);
3263                         } else if (request == PMC_STATE_FREE &&
3264                             (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) {
3265                                 phw->phw_state |=  PMC_PHW_FLAG_IS_ENABLED;
3266                                 PMC_UNMARK_ROW_STANDALONE(ri);
3267                         }
3268                         /* other cases are a no-op */
3269                 } else
3270                         error = EBUSY;
3271
3272                 pmc_restore_cpu_binding(&pb);
3273         }
3274         break;
3275
3276
3277         /*
3278          * Allocate a PMC.
3279          */
3280
3281         case PMC_OP_PMCALLOCATE:
3282         {
3283                 int adjri, n;
3284                 u_int cpu;
3285                 uint32_t caps;
3286                 struct pmc *pmc;
3287                 enum pmc_mode mode;
3288                 struct pmc_hw *phw;
3289                 struct pmc_binding pb;
3290                 struct pmc_classdep *pcd;
3291                 struct pmc_op_pmcallocate pa;
3292
3293                 if ((error = copyin(arg, &pa, sizeof(pa))) != 0)
3294                         break;
3295
3296                 caps = pa.pm_caps;
3297                 mode = pa.pm_mode;
3298                 cpu  = pa.pm_cpu;
3299
3300                 if ((mode != PMC_MODE_SS  &&  mode != PMC_MODE_SC  &&
3301                      mode != PMC_MODE_TS  &&  mode != PMC_MODE_TC) ||
3302                     (cpu != (u_int) PMC_CPU_ANY && cpu >= pmc_cpu_max())) {
3303                         error = EINVAL;
3304                         break;
3305                 }
3306
3307                 /*
3308                  * Virtual PMCs should only ask for a default CPU.
3309                  * System mode PMCs need to specify a non-default CPU.
3310                  */
3311
3312                 if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != (u_int) PMC_CPU_ANY) ||
3313                     (PMC_IS_SYSTEM_MODE(mode) && cpu == (u_int) PMC_CPU_ANY)) {
3314                         error = EINVAL;
3315                         break;
3316                 }
3317
3318                 /*
3319                  * Check that an inactive CPU is not being asked for.
3320                  */
3321
3322                 if (PMC_IS_SYSTEM_MODE(mode) && !pmc_cpu_is_active(cpu)) {
3323                         error = ENXIO;
3324                         break;
3325                 }
3326
3327                 /*
3328                  * Refuse an allocation for a system-wide PMC if this
3329                  * process has been jailed, or if this process lacks
3330                  * super-user credentials and the sysctl tunable
3331                  * 'security.bsd.unprivileged_syspmcs' is zero.
3332                  */
3333
3334                 if (PMC_IS_SYSTEM_MODE(mode)) {
3335                         if (jailed(curthread->td_ucred)) {
3336                                 error = EPERM;
3337                                 break;
3338                         }
3339                         if (!pmc_unprivileged_syspmcs) {
3340                                 error = priv_check(curthread,
3341                                     PRIV_PMC_SYSTEM);
3342                                 if (error)
3343                                         break;
3344                         }
3345                 }
3346
3347                 /*
3348                  * Look for valid values for 'pm_flags'
3349                  */
3350
3351                 if ((pa.pm_flags & ~(PMC_F_DESCENDANTS | PMC_F_LOG_PROCCSW |
3352                     PMC_F_LOG_PROCEXIT | PMC_F_CALLCHAIN)) != 0) {
3353                         error = EINVAL;
3354                         break;
3355                 }
3356
3357                 /* process logging options are not allowed for system PMCs */
3358                 if (PMC_IS_SYSTEM_MODE(mode) && (pa.pm_flags &
3359                     (PMC_F_LOG_PROCCSW | PMC_F_LOG_PROCEXIT))) {
3360                         error = EINVAL;
3361                         break;
3362                 }
3363
3364                 /*
3365                  * All sampling mode PMCs need to be able to interrupt the
3366                  * CPU.
3367                  */
3368                 if (PMC_IS_SAMPLING_MODE(mode))
3369                         caps |= PMC_CAP_INTERRUPT;
3370
3371                 /* A valid class specifier should have been passed in. */
3372                 for (n = 0; n < md->pmd_nclass; n++)
3373                         if (md->pmd_classdep[n].pcd_class == pa.pm_class)
3374                                 break;
3375                 if (n == md->pmd_nclass) {
3376                         error = EINVAL;
3377                         break;
3378                 }
3379
3380                 /* The requested PMC capabilities should be feasible. */
3381                 if ((md->pmd_classdep[n].pcd_caps & caps) != caps) {
3382                         error = EOPNOTSUPP;
3383                         break;
3384                 }
3385
3386                 PMCDBG(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d",
3387                     pa.pm_ev, caps, mode, cpu);
3388
3389                 pmc = pmc_allocate_pmc_descriptor();
3390                 pmc->pm_id    = PMC_ID_MAKE_ID(cpu,pa.pm_mode,pa.pm_class,
3391                     PMC_ID_INVALID);
3392                 pmc->pm_event = pa.pm_ev;
3393                 pmc->pm_state = PMC_STATE_FREE;
3394                 pmc->pm_caps  = caps;
3395                 pmc->pm_flags = pa.pm_flags;
3396
3397                 /* switch thread to CPU 'cpu' */
3398                 pmc_save_cpu_binding(&pb);
3399
3400 #define PMC_IS_SHAREABLE_PMC(cpu, n)                            \
3401         (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state &           \
3402          PMC_PHW_FLAG_IS_SHAREABLE)
3403 #define PMC_IS_UNALLOCATED(cpu, n)                              \
3404         (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL)
3405
3406                 if (PMC_IS_SYSTEM_MODE(mode)) {
3407                         pmc_select_cpu(cpu);
3408                         for (n = 0; n < (int) md->pmd_npmc; n++) {
3409                                 pcd = pmc_ri_to_classdep(md, n, &adjri);
3410                                 if (pmc_can_allocate_row(n, mode) == 0 &&
3411                                     pmc_can_allocate_rowindex(
3412                                             curthread->td_proc, n, cpu) == 0 &&
3413                                     (PMC_IS_UNALLOCATED(cpu, n) ||
3414                                      PMC_IS_SHAREABLE_PMC(cpu, n)) &&
3415                                     pcd->pcd_allocate_pmc(cpu, adjri, pmc,
3416                                         &pa) == 0)
3417                                         break;
3418                         }
3419                 } else {
3420                         /* Process virtual mode */
3421                         for (n = 0; n < (int) md->pmd_npmc; n++) {
3422                                 pcd = pmc_ri_to_classdep(md, n, &adjri);
3423                                 if (pmc_can_allocate_row(n, mode) == 0 &&
3424                                     pmc_can_allocate_rowindex(
3425                                             curthread->td_proc, n,
3426                                             PMC_CPU_ANY) == 0 &&
3427                                     pcd->pcd_allocate_pmc(curthread->td_oncpu,
3428                                         adjri, pmc, &pa) == 0)
3429                                         break;
3430                         }
3431                 }
3432
3433 #undef  PMC_IS_UNALLOCATED
3434 #undef  PMC_IS_SHAREABLE_PMC
3435
3436                 pmc_restore_cpu_binding(&pb);
3437
3438                 if (n == (int) md->pmd_npmc) {
3439                         pmc_destroy_pmc_descriptor(pmc);
3440                         free(pmc, M_PMC);
3441                         pmc = NULL;
3442                         error = EINVAL;
3443                         break;
3444                 }
3445
3446                 /* Fill in the correct value in the ID field */
3447                 pmc->pm_id = PMC_ID_MAKE_ID(cpu,mode,pa.pm_class,n);
3448
3449                 PMCDBG(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x",
3450                     pmc->pm_event, pa.pm_class, mode, n, pmc->pm_id);
3451
3452                 /* Process mode PMCs with logging enabled need log files */
3453                 if (pmc->pm_flags & (PMC_F_LOG_PROCEXIT | PMC_F_LOG_PROCCSW))
3454                         pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
3455
3456                 /* All system mode sampling PMCs require a log file */
3457                 if (PMC_IS_SAMPLING_MODE(mode) && PMC_IS_SYSTEM_MODE(mode))
3458                         pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
3459
3460                 /*
3461                  * Configure global pmc's immediately
3462                  */
3463
3464                 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pmc))) {
3465
3466                         pmc_save_cpu_binding(&pb);
3467                         pmc_select_cpu(cpu);
3468
3469                         phw = pmc_pcpu[cpu]->pc_hwpmcs[n];
3470                         pcd = pmc_ri_to_classdep(md, n, &adjri);
3471
3472                         if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0 ||
3473                             (error = pcd->pcd_config_pmc(cpu, adjri, pmc)) != 0) {
3474                                 (void) pcd->pcd_release_pmc(cpu, adjri, pmc);
3475                                 pmc_destroy_pmc_descriptor(pmc);
3476                                 free(pmc, M_PMC);
3477                                 pmc = NULL;
3478                                 pmc_restore_cpu_binding(&pb);
3479                                 error = EPERM;
3480                                 break;
3481                         }
3482
3483                         pmc_restore_cpu_binding(&pb);
3484                 }
3485
3486                 pmc->pm_state    = PMC_STATE_ALLOCATED;
3487
3488                 /*
3489                  * mark row disposition
3490                  */
3491
3492                 if (PMC_IS_SYSTEM_MODE(mode))
3493                         PMC_MARK_ROW_STANDALONE(n);
3494                 else
3495                         PMC_MARK_ROW_THREAD(n);
3496
3497                 /*
3498                  * Register this PMC with the current thread as its owner.
3499                  */
3500
3501                 if ((error =
3502                     pmc_register_owner(curthread->td_proc, pmc)) != 0) {
3503                         pmc_release_pmc_descriptor(pmc);
3504                         free(pmc, M_PMC);
3505                         pmc = NULL;
3506                         break;
3507                 }
3508
3509                 /*
3510                  * Return the allocated index.
3511                  */
3512
3513                 pa.pm_pmcid = pmc->pm_id;
3514
3515                 error = copyout(&pa, arg, sizeof(pa));
3516         }
3517         break;
3518
3519
3520         /*
3521          * Attach a PMC to a process.
3522          */
3523
3524         case PMC_OP_PMCATTACH:
3525         {
3526                 struct pmc *pm;
3527                 struct proc *p;
3528                 struct pmc_op_pmcattach a;
3529
3530                 sx_assert(&pmc_sx, SX_XLOCKED);
3531
3532                 if ((error = copyin(arg, &a, sizeof(a))) != 0)
3533                         break;
3534
3535                 if (a.pm_pid < 0) {
3536                         error = EINVAL;
3537                         break;
3538                 } else if (a.pm_pid == 0)
3539                         a.pm_pid = td->td_proc->p_pid;
3540
3541                 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
3542                         break;
3543
3544                 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
3545                         error = EINVAL;
3546                         break;
3547                 }
3548
3549                 /* PMCs may be (re)attached only when allocated or stopped */
3550                 if (pm->pm_state == PMC_STATE_RUNNING) {
3551                         error = EBUSY;
3552                         break;
3553                 } else if (pm->pm_state != PMC_STATE_ALLOCATED &&
3554                     pm->pm_state != PMC_STATE_STOPPED) {
3555                         error = EINVAL;
3556                         break;
3557                 }
3558
3559                 /* lookup pid */
3560                 if ((p = pfind(a.pm_pid)) == NULL) {
3561                         error = ESRCH;
3562                         break;
3563                 }
3564
3565                 /*
3566                  * Ignore processes that are working on exiting.
3567                  */
3568                 if (p->p_flag & P_WEXIT) {
3569                         error = ESRCH;
3570                         PROC_UNLOCK(p); /* pfind() returns a locked process */
3571                         break;
3572                 }
3573
3574                 /*
3575                  * we are allowed to attach a PMC to a process if
3576                  * we can debug it.
3577                  */
3578                 error = p_candebug(curthread, p);
3579
3580                 PROC_UNLOCK(p);
3581
3582                 if (error == 0)
3583                         error = pmc_attach_process(p, pm);
3584         }
3585         break;
3586
3587
3588         /*
3589          * Detach an attached PMC from a process.
3590          */
3591
3592         case PMC_OP_PMCDETACH:
3593         {
3594                 struct pmc *pm;
3595                 struct proc *p;
3596                 struct pmc_op_pmcattach a;
3597
3598                 if ((error = copyin(arg, &a, sizeof(a))) != 0)
3599                         break;
3600
3601                 if (a.pm_pid < 0) {
3602                         error = EINVAL;
3603                         break;
3604                 } else if (a.pm_pid == 0)
3605                         a.pm_pid = td->td_proc->p_pid;
3606
3607                 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
3608                         break;
3609
3610                 if ((p = pfind(a.pm_pid)) == NULL) {
3611                         error = ESRCH;
3612                         break;
3613                 }
3614
3615                 /*
3616                  * Treat processes that are in the process of exiting
3617                  * as if they were not present.
3618                  */
3619
3620                 if (p->p_flag & P_WEXIT)
3621                         error = ESRCH;
3622
3623                 PROC_UNLOCK(p); /* pfind() returns a locked process */
3624
3625                 if (error == 0)
3626                         error = pmc_detach_process(p, pm);
3627         }
3628         break;
3629
3630
3631         /*
3632          * Retrieve the MSR number associated with the counter
3633          * 'pmc_id'.  This allows processes to directly use RDPMC
3634          * instructions to read their PMCs, without the overhead of a
3635          * system call.
3636          */
3637
3638         case PMC_OP_PMCGETMSR:
3639         {
3640                 int adjri, ri;
3641                 struct pmc *pm;
3642                 struct pmc_target *pt;
3643                 struct pmc_op_getmsr gm;
3644                 struct pmc_classdep *pcd;
3645
3646                 PMC_DOWNGRADE_SX();
3647
3648                 if ((error = copyin(arg, &gm, sizeof(gm))) != 0)
3649                         break;
3650
3651                 if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0)
3652                         break;
3653
3654                 /*
3655                  * The allocated PMC has to be a process virtual PMC,
3656                  * i.e., of type MODE_T[CS].  Global PMCs can only be
3657                  * read using the PMCREAD operation since they may be
3658                  * allocated on a different CPU than the one we could
3659                  * be running on at the time of the RDPMC instruction.
3660                  *
3661                  * The GETMSR operation is not allowed for PMCs that
3662                  * are inherited across processes.
3663                  */
3664
3665                 if (!PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) ||
3666                     (pm->pm_flags & PMC_F_DESCENDANTS)) {
3667                         error = EINVAL;
3668                         break;
3669                 }
3670
3671                 /*
3672                  * It only makes sense to use a RDPMC (or its
3673                  * equivalent instruction on non-x86 architectures) on
3674                  * a process that has allocated and attached a PMC to
3675                  * itself.  Conversely the PMC is only allowed to have
3676                  * one process attached to it -- its owner.
3677                  */
3678
3679                 if ((pt = LIST_FIRST(&pm->pm_targets)) == NULL ||
3680                     LIST_NEXT(pt, pt_next) != NULL ||
3681                     pt->pt_process->pp_proc != pm->pm_owner->po_owner) {
3682                         error = EINVAL;
3683                         break;
3684                 }
3685
3686                 ri = PMC_TO_ROWINDEX(pm);
3687                 pcd = pmc_ri_to_classdep(md, ri, &adjri);
3688
3689                 /* PMC class has no 'GETMSR' support */
3690                 if (pcd->pcd_get_msr == NULL) {
3691                         error = ENOSYS;
3692                         break;
3693                 }
3694
3695                 if ((error = (*pcd->pcd_get_msr)(adjri, &gm.pm_msr)) < 0)
3696                         break;
3697
3698                 if ((error = copyout(&gm, arg, sizeof(gm))) < 0)
3699                         break;
3700
3701                 /*
3702                  * Mark our process as using MSRs.  Update machine
3703                  * state using a forced context switch.
3704                  */
3705
3706                 pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS;
3707                 pmc_force_context_switch();
3708
3709         }
3710         break;
3711
3712         /*
3713          * Release an allocated PMC
3714          */
3715
3716         case PMC_OP_PMCRELEASE:
3717         {
3718                 pmc_id_t pmcid;
3719                 struct pmc *pm;
3720                 struct pmc_owner *po;
3721                 struct pmc_op_simple sp;
3722
3723                 /*
3724                  * Find PMC pointer for the named PMC.
3725                  *
3726                  * Use pmc_release_pmc_descriptor() to switch off the
3727                  * PMC, remove all its target threads, and remove the
3728                  * PMC from its owner's list.
3729                  *
3730                  * Remove the owner record if this is the last PMC
3731                  * owned.
3732                  *
3733                  * Free up space.
3734                  */
3735
3736                 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3737                         break;
3738
3739                 pmcid = sp.pm_pmcid;
3740
3741                 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3742                         break;
3743
3744                 po = pm->pm_owner;
3745                 pmc_release_pmc_descriptor(pm);
3746                 pmc_maybe_remove_owner(po);
3747
3748                 free(pm, M_PMC);
3749         }
3750         break;
3751
3752
3753         /*
3754          * Read and/or write a PMC.
3755          */
3756
3757         case PMC_OP_PMCRW:
3758         {
3759                 int adjri;
3760                 struct pmc *pm;
3761                 uint32_t cpu, ri;
3762                 pmc_value_t oldvalue;
3763                 struct pmc_binding pb;
3764                 struct pmc_op_pmcrw prw;
3765                 struct pmc_classdep *pcd;
3766                 struct pmc_op_pmcrw *pprw;
3767
3768                 PMC_DOWNGRADE_SX();
3769
3770                 if ((error = copyin(arg, &prw, sizeof(prw))) != 0)
3771                         break;
3772
3773                 ri = 0;
3774                 PMCDBG(PMC,OPS,1, "rw id=%d flags=0x%x", prw.pm_pmcid,
3775                     prw.pm_flags);
3776
3777                 /* must have at least one flag set */
3778                 if ((prw.pm_flags & (PMC_F_OLDVALUE|PMC_F_NEWVALUE)) == 0) {
3779                         error = EINVAL;
3780                         break;
3781                 }
3782
3783                 /* locate pmc descriptor */
3784                 if ((error = pmc_find_pmc(prw.pm_pmcid, &pm)) != 0)
3785                         break;
3786
3787                 /* Can't read a PMC that hasn't been started. */
3788                 if (pm->pm_state != PMC_STATE_ALLOCATED &&
3789                     pm->pm_state != PMC_STATE_STOPPED &&
3790                     pm->pm_state != PMC_STATE_RUNNING) {
3791                         error = EINVAL;
3792                         break;
3793                 }
3794
3795                 /* writing a new value is allowed only for 'STOPPED' pmcs */
3796                 if (pm->pm_state == PMC_STATE_RUNNING &&
3797                     (prw.pm_flags & PMC_F_NEWVALUE)) {
3798                         error = EBUSY;
3799                         break;
3800                 }
3801
3802                 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) {
3803
3804                         /*
3805                          * If this PMC is attached to its owner (i.e.,
3806                          * the process requesting this operation) and
3807                          * is running, then attempt to get an
3808                          * upto-date reading from hardware for a READ.
3809                          * Writes are only allowed when the PMC is
3810                          * stopped, so only update the saved value
3811                          * field.
3812                          *
3813                          * If the PMC is not running, or is not
3814                          * attached to its owner, read/write to the
3815                          * savedvalue field.
3816                          */
3817
3818                         ri = PMC_TO_ROWINDEX(pm);
3819                         pcd = pmc_ri_to_classdep(md, ri, &adjri);
3820
3821                         mtx_pool_lock_spin(pmc_mtxpool, pm);
3822                         cpu = curthread->td_oncpu;
3823
3824                         if (prw.pm_flags & PMC_F_OLDVALUE) {
3825                                 if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) &&
3826                                     (pm->pm_state == PMC_STATE_RUNNING))
3827                                         error = (*pcd->pcd_read_pmc)(cpu, adjri,
3828                                             &oldvalue);
3829                                 else
3830                                         oldvalue = pm->pm_gv.pm_savedvalue;
3831                         }
3832                         if (prw.pm_flags & PMC_F_NEWVALUE)
3833                                 pm->pm_gv.pm_savedvalue = prw.pm_value;
3834
3835                         mtx_pool_unlock_spin(pmc_mtxpool, pm);
3836
3837                 } else { /* System mode PMCs */
3838                         cpu = PMC_TO_CPU(pm);
3839                         ri  = PMC_TO_ROWINDEX(pm);
3840                         pcd = pmc_ri_to_classdep(md, ri, &adjri);
3841
3842                         if (!pmc_cpu_is_active(cpu)) {
3843                                 error = ENXIO;
3844                                 break;
3845                         }
3846
3847                         /* move this thread to CPU 'cpu' */
3848                         pmc_save_cpu_binding(&pb);
3849                         pmc_select_cpu(cpu);
3850
3851                         critical_enter();
3852                         /* save old value */
3853                         if (prw.pm_flags & PMC_F_OLDVALUE)
3854                                 if ((error = (*pcd->pcd_read_pmc)(cpu, adjri,
3855                                          &oldvalue)))
3856                                         goto error;
3857                         /* write out new value */
3858                         if (prw.pm_flags & PMC_F_NEWVALUE)
3859                                 error = (*pcd->pcd_write_pmc)(cpu, adjri,
3860                                     prw.pm_value);
3861                 error:
3862                         critical_exit();
3863                         pmc_restore_cpu_binding(&pb);
3864                         if (error)
3865                                 break;
3866                 }
3867
3868                 pprw = (struct pmc_op_pmcrw *) arg;
3869
3870 #ifdef  DEBUG
3871                 if (prw.pm_flags & PMC_F_NEWVALUE)
3872                         PMCDBG(PMC,OPS,2, "rw id=%d new %jx -> old %jx",
3873                             ri, prw.pm_value, oldvalue);
3874                 else if (prw.pm_flags & PMC_F_OLDVALUE)
3875                         PMCDBG(PMC,OPS,2, "rw id=%d -> old %jx", ri, oldvalue);
3876 #endif
3877
3878                 /* return old value if requested */
3879                 if (prw.pm_flags & PMC_F_OLDVALUE)
3880                         if ((error = copyout(&oldvalue, &pprw->pm_value,
3881                                  sizeof(prw.pm_value))))
3882                                 break;
3883
3884         }
3885         break;
3886
3887
3888         /*
3889          * Set the sampling rate for a sampling mode PMC and the
3890          * initial count for a counting mode PMC.
3891          */
3892
3893         case PMC_OP_PMCSETCOUNT:
3894         {
3895                 struct pmc *pm;
3896                 struct pmc_op_pmcsetcount sc;
3897
3898                 PMC_DOWNGRADE_SX();
3899
3900                 if ((error = copyin(arg, &sc, sizeof(sc))) != 0)
3901                         break;
3902
3903                 if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0)
3904                         break;
3905
3906                 if (pm->pm_state == PMC_STATE_RUNNING) {
3907                         error = EBUSY;
3908                         break;
3909                 }
3910
3911                 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
3912                         pm->pm_sc.pm_reloadcount = sc.pm_count;
3913                 else
3914                         pm->pm_sc.pm_initial = sc.pm_count;
3915         }
3916         break;
3917
3918
3919         /*
3920          * Start a PMC.
3921          */
3922
3923         case PMC_OP_PMCSTART:
3924         {
3925                 pmc_id_t pmcid;
3926                 struct pmc *pm;
3927                 struct pmc_op_simple sp;
3928
3929                 sx_assert(&pmc_sx, SX_XLOCKED);
3930
3931                 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3932                         break;
3933
3934                 pmcid = sp.pm_pmcid;
3935
3936                 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3937                         break;
3938
3939                 KASSERT(pmcid == pm->pm_id,
3940                     ("[pmc,%d] pmcid %x != id %x", __LINE__,
3941                         pm->pm_id, pmcid));
3942
3943                 if (pm->pm_state == PMC_STATE_RUNNING) /* already running */
3944                         break;
3945                 else if (pm->pm_state != PMC_STATE_STOPPED &&
3946                     pm->pm_state != PMC_STATE_ALLOCATED) {
3947                         error = EINVAL;
3948                         break;
3949                 }
3950
3951                 error = pmc_start(pm);
3952         }
3953         break;
3954
3955
3956         /*
3957          * Stop a PMC.
3958          */
3959
3960         case PMC_OP_PMCSTOP:
3961         {
3962                 pmc_id_t pmcid;
3963                 struct pmc *pm;
3964                 struct pmc_op_simple sp;
3965
3966                 PMC_DOWNGRADE_SX();
3967
3968                 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3969                         break;
3970
3971                 pmcid = sp.pm_pmcid;
3972
3973                 /*
3974                  * Mark the PMC as inactive and invoke the MD stop
3975                  * routines if needed.
3976                  */
3977
3978                 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3979                         break;
3980
3981                 KASSERT(pmcid == pm->pm_id,
3982                     ("[pmc,%d] pmc id %x != pmcid %x", __LINE__,
3983                         pm->pm_id, pmcid));
3984
3985                 if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */
3986                         break;
3987                 else if (pm->pm_state != PMC_STATE_RUNNING) {
3988                         error = EINVAL;
3989                         break;
3990                 }
3991
3992                 error = pmc_stop(pm);
3993         }
3994         break;
3995
3996
3997         /*
3998          * Write a user supplied value to the log file.
3999          */
4000
4001         case PMC_OP_WRITELOG:
4002         {
4003                 struct pmc_op_writelog wl;
4004                 struct pmc_owner *po;
4005
4006                 PMC_DOWNGRADE_SX();
4007
4008                 if ((error = copyin(arg, &wl, sizeof(wl))) != 0)
4009                         break;
4010
4011                 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
4012                         error = EINVAL;
4013                         break;
4014                 }
4015
4016                 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
4017                         error = EINVAL;
4018                         break;
4019                 }
4020
4021                 error = pmclog_process_userlog(po, &wl);
4022         }
4023         break;
4024
4025
4026         default:
4027                 error = EINVAL;
4028                 break;
4029         }
4030
4031         if (is_sx_locked != 0) {
4032                 if (is_sx_downgraded)
4033                         sx_sunlock(&pmc_sx);
4034                 else
4035                         sx_xunlock(&pmc_sx);
4036         }
4037
4038         if (error)
4039                 atomic_add_int(&pmc_stats.pm_syscall_errors, 1);
4040
4041         PICKUP_GIANT();
4042
4043         return error;
4044 }
4045
4046 /*
4047  * Helper functions
4048  */
4049
4050
4051 /*
4052  * Mark the thread as needing callchain capture and post an AST.  The
4053  * actual callchain capture will be done in a context where it is safe
4054  * to take page faults.
4055  */
4056
4057 static void
4058 pmc_post_callchain_callback(void)
4059 {
4060         struct thread *td;
4061
4062         td = curthread;
4063
4064         /*
4065          * If there is multiple PMCs for the same interrupt ignore new post
4066          */
4067         if (td->td_pflags & TDP_CALLCHAIN)
4068                 return;
4069
4070         /*
4071          * Mark this thread as needing callchain capture.
4072          * `td->td_pflags' will be safe to touch because this thread
4073          * was in user space when it was interrupted.
4074          */
4075         td->td_pflags |= TDP_CALLCHAIN;
4076
4077         /*
4078          * Don't let this thread migrate between CPUs until callchain
4079          * capture completes.
4080          */
4081         sched_pin();
4082
4083         return;
4084 }
4085
4086 /*
4087  * Interrupt processing.
4088  *
4089  * Find a free slot in the per-cpu array of samples and capture the
4090  * current callchain there.  If a sample was successfully added, a bit
4091  * is set in mask 'pmc_cpumask' denoting that the DO_SAMPLES hook
4092  * needs to be invoked from the clock handler.
4093  *
4094  * This function is meant to be called from an NMI handler.  It cannot
4095  * use any of the locking primitives supplied by the OS.
4096  */
4097
4098 int
4099 pmc_process_interrupt(int cpu, int ring, struct pmc *pm, struct trapframe *tf,
4100     int inuserspace)
4101 {
4102         int error, callchaindepth;
4103         struct thread *td;
4104         struct pmc_sample *ps;
4105         struct pmc_samplebuffer *psb;
4106
4107         error = 0;
4108
4109         /*
4110          * Allocate space for a sample buffer.
4111          */
4112         psb = pmc_pcpu[cpu]->pc_sb[ring];
4113
4114         ps = psb->ps_write;
4115         if (ps->ps_nsamples) {  /* in use, reader hasn't caught up */
4116                 pm->pm_stalled = 1;
4117                 atomic_add_int(&pmc_stats.pm_intr_bufferfull, 1);
4118                 PMCDBG(SAM,INT,1,"(spc) cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d",
4119                     cpu, pm, (void *) tf, inuserspace,
4120                     (int) (psb->ps_write - psb->ps_samples),
4121                     (int) (psb->ps_read - psb->ps_samples));
4122                 error = ENOMEM;
4123                 goto done;
4124         }
4125
4126
4127         /* Fill in entry. */
4128         PMCDBG(SAM,INT,1,"cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", cpu, pm,
4129             (void *) tf, inuserspace,
4130             (int) (psb->ps_write - psb->ps_samples),
4131             (int) (psb->ps_read - psb->ps_samples));
4132
4133         KASSERT(pm->pm_runcount >= 0,
4134             ("[pmc,%d] pm=%p runcount %d", __LINE__, (void *) pm,
4135                 pm->pm_runcount));
4136
4137         atomic_add_rel_int(&pm->pm_runcount, 1);        /* hold onto PMC */
4138
4139         ps->ps_pmc = pm;
4140         if ((td = curthread) && td->td_proc)
4141                 ps->ps_pid = td->td_proc->p_pid;
4142         else
4143                 ps->ps_pid = -1;
4144         ps->ps_cpu = cpu;
4145         ps->ps_td = td;
4146         ps->ps_flags = inuserspace ? PMC_CC_F_USERSPACE : 0;
4147
4148         callchaindepth = (pm->pm_flags & PMC_F_CALLCHAIN) ?
4149             pmc_callchaindepth : 1;
4150
4151         if (callchaindepth == 1)
4152                 ps->ps_pc[0] = PMC_TRAPFRAME_TO_PC(tf);
4153         else {
4154                 /*
4155                  * Kernel stack traversals can be done immediately,
4156                  * while we defer to an AST for user space traversals.
4157                  */
4158                 if (!inuserspace) {
4159                         callchaindepth =
4160                             pmc_save_kernel_callchain(ps->ps_pc,
4161                                 callchaindepth, tf);
4162                 } else {
4163                         pmc_post_callchain_callback();
4164                         callchaindepth = PMC_SAMPLE_INUSE;
4165                 }
4166         }
4167
4168         ps->ps_nsamples = callchaindepth;       /* mark entry as in use */
4169
4170         /* increment write pointer, modulo ring buffer size */
4171         ps++;
4172         if (ps == psb->ps_fence)
4173                 psb->ps_write = psb->ps_samples;
4174         else
4175                 psb->ps_write = ps;
4176
4177  done:
4178         /* mark CPU as needing processing */
4179         CPU_SET_ATOMIC(cpu, &pmc_cpumask);
4180
4181         return (error);
4182 }
4183
4184 /*
4185  * Capture a user call chain.  This function will be called from ast()
4186  * before control returns to userland and before the process gets
4187  * rescheduled.
4188  */
4189
4190 static void
4191 pmc_capture_user_callchain(int cpu, int ring, struct trapframe *tf)
4192 {
4193         int i;
4194         struct pmc *pm;
4195         struct thread *td;
4196         struct pmc_sample *ps;
4197         struct pmc_samplebuffer *psb;
4198 #ifdef  INVARIANTS
4199         int ncallchains;
4200 #endif
4201
4202         psb = pmc_pcpu[cpu]->pc_sb[ring];
4203         td = curthread;
4204
4205         KASSERT(td->td_pflags & TDP_CALLCHAIN,
4206             ("[pmc,%d] Retrieving callchain for thread that doesn't want it",
4207                 __LINE__));
4208
4209 #ifdef  INVARIANTS
4210         ncallchains = 0;
4211 #endif
4212
4213         /*
4214          * Iterate through all deferred callchain requests.
4215          */
4216
4217         ps = psb->ps_samples;
4218         for (i = 0; i < pmc_nsamples; i++, ps++) {
4219
4220                 if (ps->ps_nsamples != PMC_SAMPLE_INUSE)
4221                         continue;
4222                 if (ps->ps_td != td)
4223                         continue;
4224
4225                 KASSERT(ps->ps_cpu == cpu,
4226                     ("[pmc,%d] cpu mismatch ps_cpu=%d pcpu=%d", __LINE__,
4227                         ps->ps_cpu, PCPU_GET(cpuid)));
4228
4229                 pm = ps->ps_pmc;
4230
4231                 KASSERT(pm->pm_flags & PMC_F_CALLCHAIN,
4232                     ("[pmc,%d] Retrieving callchain for PMC that doesn't "
4233                         "want it", __LINE__));
4234
4235                 KASSERT(pm->pm_runcount > 0,
4236                     ("[pmc,%d] runcount %d", __LINE__, pm->pm_runcount));
4237
4238                 /*
4239                  * Retrieve the callchain and mark the sample buffer
4240                  * as 'processable' by the timer tick sweep code.
4241                  */
4242                 ps->ps_nsamples = pmc_save_user_callchain(ps->ps_pc,
4243                     pmc_callchaindepth, tf);
4244
4245 #ifdef  INVARIANTS
4246                 ncallchains++;
4247 #endif
4248         }
4249
4250         KASSERT(ncallchains > 0,
4251             ("[pmc,%d] cpu %d didn't find a sample to collect", __LINE__,
4252                 cpu));
4253
4254         KASSERT(td->td_pinned > 0,
4255             ("[pmc,%d] invalid td_pinned value", __LINE__));
4256         sched_unpin();  /* Can migrate safely now. */
4257
4258         return;
4259 }
4260
4261 /*
4262  * Process saved PC samples.
4263  */
4264
4265 static void
4266 pmc_process_samples(int cpu, int ring)
4267 {
4268         struct pmc *pm;
4269         int adjri, n;
4270         struct thread *td;
4271         struct pmc_owner *po;
4272         struct pmc_sample *ps;
4273         struct pmc_classdep *pcd;
4274         struct pmc_samplebuffer *psb;
4275
4276         KASSERT(PCPU_GET(cpuid) == cpu,
4277             ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", __LINE__,
4278                 PCPU_GET(cpuid), cpu));
4279
4280         psb = pmc_pcpu[cpu]->pc_sb[ring];
4281
4282         for (n = 0; n < pmc_nsamples; n++) { /* bound on #iterations */
4283
4284                 ps = psb->ps_read;
4285                 if (ps->ps_nsamples == PMC_SAMPLE_FREE)
4286                         break;
4287
4288                 pm = ps->ps_pmc;
4289
4290                 KASSERT(pm->pm_runcount > 0,
4291                     ("[pmc,%d] pm=%p runcount %d", __LINE__, (void *) pm,
4292                         pm->pm_runcount));
4293
4294                 po = pm->pm_owner;
4295
4296                 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
4297                     ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__,
4298                         pm, PMC_TO_MODE(pm)));
4299
4300                 /* Ignore PMCs that have been switched off */
4301                 if (pm->pm_state != PMC_STATE_RUNNING)
4302                         goto entrydone;
4303
4304                 /* If there is a pending AST wait for completion */
4305                 if (ps->ps_nsamples == PMC_SAMPLE_INUSE) {
4306                         /* Need a rescan at a later time. */
4307                         CPU_SET_ATOMIC(cpu, &pmc_cpumask);
4308                         break;
4309                 }
4310
4311                 PMCDBG(SAM,OPS,1,"cpu=%d pm=%p n=%d fl=%x wr=%d rd=%d", cpu,
4312                     pm, ps->ps_nsamples, ps->ps_flags,
4313                     (int) (psb->ps_write - psb->ps_samples),
4314                     (int) (psb->ps_read - psb->ps_samples));
4315
4316                 /*
4317                  * If this is a process-mode PMC that is attached to
4318                  * its owner, and if the PC is in user mode, update
4319                  * profiling statistics like timer-based profiling
4320                  * would have done.
4321                  */
4322                 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) {
4323                         if (ps->ps_flags & PMC_CC_F_USERSPACE) {
4324                                 td = FIRST_THREAD_IN_PROC(po->po_owner);
4325                                 addupc_intr(td, ps->ps_pc[0], 1);
4326                         }
4327                         goto entrydone;
4328                 }
4329
4330                 /*
4331                  * Otherwise, this is either a sampling mode PMC that
4332                  * is attached to a different process than its owner,
4333                  * or a system-wide sampling PMC.  Dispatch a log
4334                  * entry to the PMC's owner process.
4335                  */
4336                 pmclog_process_callchain(pm, ps);
4337
4338         entrydone:
4339                 ps->ps_nsamples = 0; /* mark entry as free */
4340                 atomic_subtract_rel_int(&pm->pm_runcount, 1);
4341
4342                 /* increment read pointer, modulo sample size */
4343                 if (++ps == psb->ps_fence)
4344                         psb->ps_read = psb->ps_samples;
4345                 else
4346                         psb->ps_read = ps;
4347         }
4348
4349         atomic_add_int(&pmc_stats.pm_log_sweeps, 1);
4350
4351         /* Do not re-enable stalled PMCs if we failed to process any samples */
4352         if (n == 0)
4353                 return;
4354
4355         /*
4356          * Restart any stalled sampling PMCs on this CPU.
4357          *
4358          * If the NMI handler sets the pm_stalled field of a PMC after
4359          * the check below, we'll end up processing the stalled PMC at
4360          * the next hardclock tick.
4361          */
4362         for (n = 0; n < md->pmd_npmc; n++) {
4363                 pcd = pmc_ri_to_classdep(md, n, &adjri);
4364                 KASSERT(pcd != NULL,
4365                     ("[pmc,%d] null pcd ri=%d", __LINE__, n));
4366                 (void) (*pcd->pcd_get_config)(cpu,adjri,&pm);
4367
4368                 if (pm == NULL ||                        /* !cfg'ed */
4369                     pm->pm_state != PMC_STATE_RUNNING || /* !active */
4370                     !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) || /* !sampling */
4371                     pm->pm_stalled == 0) /* !stalled */
4372                         continue;
4373
4374                 pm->pm_stalled = 0;
4375                 (*pcd->pcd_start_pmc)(cpu, adjri);
4376         }
4377 }
4378
4379 /*
4380  * Event handlers.
4381  */
4382
4383 /*
4384  * Handle a process exit.
4385  *
4386  * Remove this process from all hash tables.  If this process
4387  * owned any PMCs, turn off those PMCs and deallocate them,
4388  * removing any associations with target processes.
4389  *
4390  * This function will be called by the last 'thread' of a
4391  * process.
4392  *
4393  * XXX This eventhandler gets called early in the exit process.
4394  * Consider using a 'hook' invocation from thread_exit() or equivalent
4395  * spot.  Another negative is that kse_exit doesn't seem to call
4396  * exit1() [??].
4397  *
4398  */
4399
4400 static void
4401 pmc_process_exit(void *arg __unused, struct proc *p)
4402 {
4403         struct pmc *pm;
4404         int adjri, cpu;
4405         unsigned int ri;
4406         int is_using_hwpmcs;
4407         struct pmc_owner *po;
4408         struct pmc_process *pp;
4409         struct pmc_classdep *pcd;
4410         pmc_value_t newvalue, tmp;
4411
4412         PROC_LOCK(p);
4413         is_using_hwpmcs = p->p_flag & P_HWPMC;
4414         PROC_UNLOCK(p);
4415
4416         /*
4417          * Log a sysexit event to all SS PMC owners.
4418          */
4419         LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
4420             if (po->po_flags & PMC_PO_OWNS_LOGFILE)
4421                     pmclog_process_sysexit(po, p->p_pid);
4422
4423         if (!is_using_hwpmcs)
4424                 return;
4425
4426         PMC_GET_SX_XLOCK();
4427         PMCDBG(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid,
4428             p->p_comm);
4429
4430         /*
4431          * Since this code is invoked by the last thread in an exiting
4432          * process, we would have context switched IN at some prior
4433          * point.  However, with PREEMPTION, kernel mode context
4434          * switches may happen any time, so we want to disable a
4435          * context switch OUT till we get any PMCs targetting this
4436          * process off the hardware.
4437          *
4438          * We also need to atomically remove this process'
4439          * entry from our target process hash table, using
4440          * PMC_FLAG_REMOVE.
4441          */
4442         PMCDBG(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid,
4443             p->p_comm);
4444
4445         critical_enter(); /* no preemption */
4446
4447         cpu = curthread->td_oncpu;
4448
4449         if ((pp = pmc_find_process_descriptor(p,
4450                  PMC_FLAG_REMOVE)) != NULL) {
4451
4452                 PMCDBG(PRC,EXT,2,
4453                     "process-exit proc=%p pmc-process=%p", p, pp);
4454
4455                 /*
4456                  * The exiting process could the target of
4457                  * some PMCs which will be running on
4458                  * currently executing CPU.
4459                  *
4460                  * We need to turn these PMCs off like we
4461                  * would do at context switch OUT time.
4462                  */
4463                 for (ri = 0; ri < md->pmd_npmc; ri++) {
4464
4465                         /*
4466                          * Pick up the pmc pointer from hardware
4467                          * state similar to the CSW_OUT code.
4468                          */
4469                         pm = NULL;
4470
4471                         pcd = pmc_ri_to_classdep(md, ri, &adjri);
4472
4473                         (void) (*pcd->pcd_get_config)(cpu, adjri, &pm);
4474
4475                         PMCDBG(PRC,EXT,2, "ri=%d pm=%p", ri, pm);
4476
4477                         if (pm == NULL ||
4478                             !PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
4479                                 continue;
4480
4481                         PMCDBG(PRC,EXT,2, "ppmcs[%d]=%p pm=%p "
4482                             "state=%d", ri, pp->pp_pmcs[ri].pp_pmc,
4483                             pm, pm->pm_state);
4484
4485                         KASSERT(PMC_TO_ROWINDEX(pm) == ri,
4486                             ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
4487                                 __LINE__, PMC_TO_ROWINDEX(pm), ri));
4488
4489                         KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
4490                             ("[pmc,%d] pm %p != pp_pmcs[%d] %p",
4491                                 __LINE__, pm, ri, pp->pp_pmcs[ri].pp_pmc));
4492
4493                         (void) pcd->pcd_stop_pmc(cpu, adjri);
4494
4495                         KASSERT(pm->pm_runcount > 0,
4496                             ("[pmc,%d] bad runcount ri %d rc %d",
4497                                 __LINE__, ri, pm->pm_runcount));
4498
4499                         /* Stop hardware only if it is actually running */
4500                         if (pm->pm_state == PMC_STATE_RUNNING &&
4501                             pm->pm_stalled == 0) {
4502                                 pcd->pcd_read_pmc(cpu, adjri, &newvalue);
4503                                 tmp = newvalue -
4504                                     PMC_PCPU_SAVED(cpu,ri);
4505
4506                                 mtx_pool_lock_spin(pmc_mtxpool, pm);
4507                                 pm->pm_gv.pm_savedvalue += tmp;
4508                                 pp->pp_pmcs[ri].pp_pmcval += tmp;
4509                                 mtx_pool_unlock_spin(pmc_mtxpool, pm);
4510                         }
4511
4512                         atomic_subtract_rel_int(&pm->pm_runcount,1);
4513
4514                         KASSERT((int) pm->pm_runcount >= 0,
4515                             ("[pmc,%d] runcount is %d", __LINE__, ri));
4516
4517                         (void) pcd->pcd_config_pmc(cpu, adjri, NULL);
4518                 }
4519
4520                 /*
4521                  * Inform the MD layer of this pseudo "context switch
4522                  * out"
4523                  */
4524                 (void) md->pmd_switch_out(pmc_pcpu[cpu], pp);
4525
4526                 critical_exit(); /* ok to be pre-empted now */
4527
4528                 /*
4529                  * Unlink this process from the PMCs that are
4530                  * targetting it.  This will send a signal to
4531                  * all PMC owner's whose PMCs are orphaned.
4532                  *
4533                  * Log PMC value at exit time if requested.
4534                  */
4535                 for (ri = 0; ri < md->pmd_npmc; ri++)
4536                         if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
4537                                 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
4538                                     PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm)))
4539                                         pmclog_process_procexit(pm, pp);
4540                                 pmc_unlink_target_process(pm, pp);
4541                         }
4542                 free(pp, M_PMC);
4543
4544         } else
4545                 critical_exit(); /* pp == NULL */
4546
4547
4548         /*
4549          * If the process owned PMCs, free them up and free up
4550          * memory.
4551          */
4552         if ((po = pmc_find_owner_descriptor(p)) != NULL) {
4553                 pmc_remove_owner(po);
4554                 pmc_destroy_owner_descriptor(po);
4555         }
4556
4557         sx_xunlock(&pmc_sx);
4558 }
4559
4560 /*
4561  * Handle a process fork.
4562  *
4563  * If the parent process 'p1' is under HWPMC monitoring, then copy
4564  * over any attached PMCs that have 'do_descendants' semantics.
4565  */
4566
4567 static void
4568 pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *newproc,
4569     int flags)
4570 {
4571         int is_using_hwpmcs;
4572         unsigned int ri;
4573         uint32_t do_descendants;
4574         struct pmc *pm;
4575         struct pmc_owner *po;
4576         struct pmc_process *ppnew, *ppold;
4577
4578         (void) flags;           /* unused parameter */
4579
4580         PROC_LOCK(p1);
4581         is_using_hwpmcs = p1->p_flag & P_HWPMC;
4582         PROC_UNLOCK(p1);
4583
4584         /*
4585          * If there are system-wide sampling PMCs active, we need to
4586          * log all fork events to their owner's logs.
4587          */
4588
4589         LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
4590             if (po->po_flags & PMC_PO_OWNS_LOGFILE)
4591                     pmclog_process_procfork(po, p1->p_pid, newproc->p_pid);
4592
4593         if (!is_using_hwpmcs)
4594                 return;
4595
4596         PMC_GET_SX_XLOCK();
4597         PMCDBG(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1,
4598             p1->p_pid, p1->p_comm, newproc);
4599
4600         /*
4601          * If the parent process (curthread->td_proc) is a
4602          * target of any PMCs, look for PMCs that are to be
4603          * inherited, and link these into the new process
4604          * descriptor.
4605          */
4606         if ((ppold = pmc_find_process_descriptor(curthread->td_proc,
4607                  PMC_FLAG_NONE)) == NULL)
4608                 goto done;              /* nothing to do */
4609
4610         do_descendants = 0;
4611         for (ri = 0; ri < md->pmd_npmc; ri++)
4612                 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL)
4613                         do_descendants |= pm->pm_flags & PMC_F_DESCENDANTS;
4614         if (do_descendants == 0) /* nothing to do */
4615                 goto done;
4616
4617         /* allocate a descriptor for the new process  */
4618         if ((ppnew = pmc_find_process_descriptor(newproc,
4619                  PMC_FLAG_ALLOCATE)) == NULL)
4620                 goto done;
4621
4622         /*
4623          * Run through all PMCs that were targeting the old process
4624          * and which specified F_DESCENDANTS and attach them to the
4625          * new process.
4626          *
4627          * Log the fork event to all owners of PMCs attached to this
4628          * process, if not already logged.
4629          */
4630         for (ri = 0; ri < md->pmd_npmc; ri++)
4631                 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL &&
4632                     (pm->pm_flags & PMC_F_DESCENDANTS)) {
4633                         pmc_link_target_process(pm, ppnew);
4634                         po = pm->pm_owner;
4635                         if (po->po_sscount == 0 &&
4636                             po->po_flags & PMC_PO_OWNS_LOGFILE)
4637                                 pmclog_process_procfork(po, p1->p_pid,
4638                                     newproc->p_pid);
4639                 }
4640
4641         /*
4642          * Now mark the new process as being tracked by this driver.
4643          */
4644         PROC_LOCK(newproc);
4645         newproc->p_flag |= P_HWPMC;
4646         PROC_UNLOCK(newproc);
4647
4648  done:
4649         sx_xunlock(&pmc_sx);
4650 }
4651
4652
4653 /*
4654  * initialization
4655  */
4656
4657 static const char *pmc_name_of_pmcclass[] = {
4658 #undef  __PMC_CLASS
4659 #define __PMC_CLASS(N) #N ,
4660         __PMC_CLASSES()
4661 };
4662
4663 /*
4664  * Base class initializer: allocate structure and set default classes.
4665  */
4666 struct pmc_mdep *
4667 pmc_mdep_alloc(int nclasses)
4668 {
4669         struct pmc_mdep *md;
4670         int     n;
4671
4672         /* SOFT + md classes */
4673         n = 1 + nclasses;
4674         md = malloc(sizeof(struct pmc_mdep) + n *
4675             sizeof(struct pmc_classdep), M_PMC, M_WAITOK|M_ZERO);
4676         if (md != NULL) {
4677                 md->pmd_nclass = n;
4678
4679                 /* Add base class. */
4680                 pmc_soft_initialize(md);
4681         }
4682
4683         return md;
4684 }
4685
4686 void
4687 pmc_mdep_free(struct pmc_mdep *md)
4688 {
4689         pmc_soft_finalize(md);
4690         free(md, M_PMC);
4691 }
4692
4693 static int
4694 generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
4695 {
4696         (void) pc; (void) pp;
4697
4698         return (0);
4699 }
4700
4701 static int
4702 generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
4703 {
4704         (void) pc; (void) pp;
4705
4706         return (0);
4707 }
4708
4709 static struct pmc_mdep *
4710 pmc_generic_cpu_initialize(void)
4711 {
4712         struct pmc_mdep *md;
4713
4714         md = pmc_mdep_alloc(0);
4715
4716         md->pmd_cputype    = PMC_CPU_GENERIC;
4717
4718         md->pmd_pcpu_init  = NULL;
4719         md->pmd_pcpu_fini  = NULL;
4720         md->pmd_switch_in  = generic_switch_in;
4721         md->pmd_switch_out = generic_switch_out;
4722
4723         return (md);
4724 }
4725
4726 static void
4727 pmc_generic_cpu_finalize(struct pmc_mdep *md)
4728 {
4729         (void) md;
4730 }
4731
4732
4733 static int
4734 pmc_initialize(void)
4735 {
4736         int c, cpu, error, n, ri;
4737         unsigned int maxcpu;
4738         struct pmc_binding pb;
4739         struct pmc_sample *ps;
4740         struct pmc_classdep *pcd;
4741         struct pmc_samplebuffer *sb;
4742
4743         md = NULL;
4744         error = 0;
4745
4746 #ifdef  DEBUG
4747         /* parse debug flags first */
4748         if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags",
4749                 pmc_debugstr, sizeof(pmc_debugstr)))
4750                 pmc_debugflags_parse(pmc_debugstr,
4751                     pmc_debugstr+strlen(pmc_debugstr));
4752 #endif
4753
4754         PMCDBG(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION);
4755
4756         /* check kernel version */
4757         if (pmc_kernel_version != PMC_VERSION) {
4758                 if (pmc_kernel_version == 0)
4759                         printf("hwpmc: this kernel has not been compiled with "
4760                             "'options HWPMC_HOOKS'.\n");
4761                 else
4762                         printf("hwpmc: kernel version (0x%x) does not match "
4763                             "module version (0x%x).\n", pmc_kernel_version,
4764                             PMC_VERSION);
4765                 return EPROGMISMATCH;
4766         }
4767
4768         /*
4769          * check sysctl parameters
4770          */
4771
4772         if (pmc_hashsize <= 0) {
4773                 (void) printf("hwpmc: tunable \"hashsize\"=%d must be "
4774                     "greater than zero.\n", pmc_hashsize);
4775                 pmc_hashsize = PMC_HASH_SIZE;
4776         }
4777
4778         if (pmc_nsamples <= 0 || pmc_nsamples > 65535) {
4779                 (void) printf("hwpmc: tunable \"nsamples\"=%d out of "
4780                     "range.\n", pmc_nsamples);
4781                 pmc_nsamples = PMC_NSAMPLES;
4782         }
4783
4784         if (pmc_callchaindepth <= 0 ||
4785             pmc_callchaindepth > PMC_CALLCHAIN_DEPTH_MAX) {
4786                 (void) printf("hwpmc: tunable \"callchaindepth\"=%d out of "
4787                     "range.\n", pmc_callchaindepth);
4788                 pmc_callchaindepth = PMC_CALLCHAIN_DEPTH;
4789         }
4790
4791         md = pmc_md_initialize();
4792         if (md == NULL) {
4793                 /* Default to generic CPU. */
4794                 md = pmc_generic_cpu_initialize();
4795                 if (md == NULL)
4796                         return (ENOSYS);
4797         }
4798
4799         KASSERT(md->pmd_nclass >= 1 && md->pmd_npmc >= 1,
4800             ("[pmc,%d] no classes or pmcs", __LINE__));
4801
4802         /* Compute the map from row-indices to classdep pointers. */
4803         pmc_rowindex_to_classdep = malloc(sizeof(struct pmc_classdep *) *
4804             md->pmd_npmc, M_PMC, M_WAITOK|M_ZERO);
4805
4806         for (n = 0; n < md->pmd_npmc; n++)
4807                 pmc_rowindex_to_classdep[n] = NULL;
4808         for (ri = c = 0; c < md->pmd_nclass; c++) {
4809                 pcd = &md->pmd_classdep[c];
4810                 for (n = 0; n < pcd->pcd_num; n++, ri++)
4811                         pmc_rowindex_to_classdep[ri] = pcd;
4812         }
4813
4814         KASSERT(ri == md->pmd_npmc,
4815             ("[pmc,%d] npmc miscomputed: ri=%d, md->npmc=%d", __LINE__,
4816             ri, md->pmd_npmc));
4817
4818         maxcpu = pmc_cpu_max();
4819
4820         /* allocate space for the per-cpu array */
4821         pmc_pcpu = malloc(maxcpu * sizeof(struct pmc_cpu *), M_PMC,
4822             M_WAITOK|M_ZERO);
4823
4824         /* per-cpu 'saved values' for managing process-mode PMCs */
4825         pmc_pcpu_saved = malloc(sizeof(pmc_value_t) * maxcpu * md->pmd_npmc,
4826             M_PMC, M_WAITOK);
4827
4828         /* Perform CPU-dependent initialization. */
4829         pmc_save_cpu_binding(&pb);
4830         error = 0;
4831         for (cpu = 0; error == 0 && cpu < maxcpu; cpu++) {
4832                 if (!pmc_cpu_is_active(cpu))
4833                         continue;
4834                 pmc_select_cpu(cpu);
4835                 pmc_pcpu[cpu] = malloc(sizeof(struct pmc_cpu) +
4836                     md->pmd_npmc * sizeof(struct pmc_hw *), M_PMC,
4837                     M_WAITOK|M_ZERO);
4838                 if (md->pmd_pcpu_init)
4839                         error = md->pmd_pcpu_init(md, cpu);
4840                 for (n = 0; error == 0 && n < md->pmd_nclass; n++)
4841                         error = md->pmd_classdep[n].pcd_pcpu_init(md, cpu);
4842         }
4843         pmc_restore_cpu_binding(&pb);
4844
4845         if (error)
4846                 return (error);
4847
4848         /* allocate space for the sample array */
4849         for (cpu = 0; cpu < maxcpu; cpu++) {
4850                 if (!pmc_cpu_is_active(cpu))
4851                         continue;
4852
4853                 sb = malloc(sizeof(struct pmc_samplebuffer) +
4854                     pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
4855                     M_WAITOK|M_ZERO);
4856                 sb->ps_read = sb->ps_write = sb->ps_samples;
4857                 sb->ps_fence = sb->ps_samples + pmc_nsamples;
4858
4859                 KASSERT(pmc_pcpu[cpu] != NULL,
4860                     ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
4861
4862                 sb->ps_callchains = malloc(pmc_callchaindepth * pmc_nsamples *
4863                     sizeof(uintptr_t), M_PMC, M_WAITOK|M_ZERO);
4864
4865                 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
4866                         ps->ps_pc = sb->ps_callchains +
4867                             (n * pmc_callchaindepth);
4868
4869                 pmc_pcpu[cpu]->pc_sb[PMC_HR] = sb;
4870
4871                 sb = malloc(sizeof(struct pmc_samplebuffer) +
4872                     pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
4873                     M_WAITOK|M_ZERO);
4874                 sb->ps_read = sb->ps_write = sb->ps_samples;
4875                 sb->ps_fence = sb->ps_samples + pmc_nsamples;
4876
4877                 KASSERT(pmc_pcpu[cpu] != NULL,
4878                     ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
4879
4880                 sb->ps_callchains = malloc(pmc_callchaindepth * pmc_nsamples *
4881                     sizeof(uintptr_t), M_PMC, M_WAITOK|M_ZERO);
4882
4883                 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
4884                         ps->ps_pc = sb->ps_callchains +
4885                             (n * pmc_callchaindepth);
4886
4887                 pmc_pcpu[cpu]->pc_sb[PMC_SR] = sb;
4888         }
4889
4890         /* allocate space for the row disposition array */
4891         pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc,
4892             M_PMC, M_WAITOK|M_ZERO);
4893
4894         KASSERT(pmc_pmcdisp != NULL,
4895             ("[pmc,%d] pmcdisp allocation returned NULL", __LINE__));
4896
4897         /* mark all PMCs as available */
4898         for (n = 0; n < (int) md->pmd_npmc; n++)
4899                 PMC_MARK_ROW_FREE(n);
4900
4901         /* allocate thread hash tables */
4902         pmc_ownerhash = hashinit(pmc_hashsize, M_PMC,
4903             &pmc_ownerhashmask);
4904
4905         pmc_processhash = hashinit(pmc_hashsize, M_PMC,
4906             &pmc_processhashmask);
4907         mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf",
4908             MTX_SPIN);
4909
4910         LIST_INIT(&pmc_ss_owners);
4911         pmc_ss_count = 0;
4912
4913         /* allocate a pool of spin mutexes */
4914         pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size,
4915             MTX_SPIN);
4916
4917         PMCDBG(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx "
4918             "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask,
4919             pmc_processhash, pmc_processhashmask);
4920
4921         /* register process {exit,fork,exec} handlers */
4922         pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit,
4923             pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY);
4924         pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork,
4925             pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY);
4926
4927         /* initialize logging */
4928         pmclog_initialize();
4929
4930         /* set hook functions */
4931         pmc_intr = md->pmd_intr;
4932         pmc_hook = pmc_hook_handler;
4933
4934         if (error == 0) {
4935                 printf(PMC_MODULE_NAME ":");
4936                 for (n = 0; n < (int) md->pmd_nclass; n++) {
4937                         pcd = &md->pmd_classdep[n];
4938                         printf(" %s/%d/%d/0x%b",
4939                             pmc_name_of_pmcclass[pcd->pcd_class],
4940                             pcd->pcd_num,
4941                             pcd->pcd_width,
4942                             pcd->pcd_caps,
4943                             "\20"
4944                             "\1INT\2USR\3SYS\4EDG\5THR"
4945                             "\6REA\7WRI\10INV\11QUA\12PRC"
4946                             "\13TAG\14CSC");
4947                 }
4948                 printf("\n");
4949         }
4950
4951         return (error);
4952 }
4953
4954 /* prepare to be unloaded */
4955 static void
4956 pmc_cleanup(void)
4957 {
4958         int c, cpu;
4959         unsigned int maxcpu;
4960         struct pmc_ownerhash *ph;
4961         struct pmc_owner *po, *tmp;
4962         struct pmc_binding pb;
4963 #ifdef  DEBUG
4964         struct pmc_processhash *prh;
4965 #endif
4966
4967         PMCDBG(MOD,INI,0, "%s", "cleanup");
4968
4969         /* switch off sampling */
4970         CPU_ZERO(&pmc_cpumask);
4971         pmc_intr = NULL;
4972
4973         sx_xlock(&pmc_sx);
4974         if (pmc_hook == NULL) { /* being unloaded already */
4975                 sx_xunlock(&pmc_sx);
4976                 return;
4977         }
4978
4979         pmc_hook = NULL; /* prevent new threads from entering module */
4980
4981         /* deregister event handlers */
4982         EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag);
4983         EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag);
4984
4985         /* send SIGBUS to all owner threads, free up allocations */
4986         if (pmc_ownerhash)
4987                 for (ph = pmc_ownerhash;
4988                      ph <= &pmc_ownerhash[pmc_ownerhashmask];
4989                      ph++) {
4990                         LIST_FOREACH_SAFE(po, ph, po_next, tmp) {
4991                                 pmc_remove_owner(po);
4992
4993                                 /* send SIGBUS to owner processes */
4994                                 PMCDBG(MOD,INI,2, "cleanup signal proc=%p "
4995                                     "(%d, %s)", po->po_owner,
4996                                     po->po_owner->p_pid,
4997                                     po->po_owner->p_comm);
4998
4999                                 PROC_LOCK(po->po_owner);
5000                                 kern_psignal(po->po_owner, SIGBUS);
5001                                 PROC_UNLOCK(po->po_owner);
5002
5003                                 pmc_destroy_owner_descriptor(po);
5004                         }
5005                 }
5006
5007         /* reclaim allocated data structures */
5008         if (pmc_mtxpool)
5009                 mtx_pool_destroy(&pmc_mtxpool);
5010
5011         mtx_destroy(&pmc_processhash_mtx);
5012         if (pmc_processhash) {
5013 #ifdef  DEBUG
5014                 struct pmc_process *pp;
5015
5016                 PMCDBG(MOD,INI,3, "%s", "destroy process hash");
5017                 for (prh = pmc_processhash;
5018                      prh <= &pmc_processhash[pmc_processhashmask];
5019                      prh++)
5020                         LIST_FOREACH(pp, prh, pp_next)
5021                             PMCDBG(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid);
5022 #endif
5023
5024                 hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask);
5025                 pmc_processhash = NULL;
5026         }
5027
5028         if (pmc_ownerhash) {
5029                 PMCDBG(MOD,INI,3, "%s", "destroy owner hash");
5030                 hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask);
5031                 pmc_ownerhash = NULL;
5032         }
5033
5034         KASSERT(LIST_EMPTY(&pmc_ss_owners),
5035             ("[pmc,%d] Global SS owner list not empty", __LINE__));
5036         KASSERT(pmc_ss_count == 0,
5037             ("[pmc,%d] Global SS count not empty", __LINE__));
5038
5039         /* do processor and pmc-class dependent cleanup */
5040         maxcpu = pmc_cpu_max();
5041
5042         PMCDBG(MOD,INI,3, "%s", "md cleanup");
5043         if (md) {
5044                 pmc_save_cpu_binding(&pb);
5045                 for (cpu = 0; cpu < maxcpu; cpu++) {
5046                         PMCDBG(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p",
5047                             cpu, pmc_pcpu[cpu]);
5048                         if (!pmc_cpu_is_active(cpu) || pmc_pcpu[cpu] == NULL)
5049                                 continue;
5050                         pmc_select_cpu(cpu);
5051                         for (c = 0; c < md->pmd_nclass; c++)
5052                                 md->pmd_classdep[c].pcd_pcpu_fini(md, cpu);
5053                         if (md->pmd_pcpu_fini)
5054                                 md->pmd_pcpu_fini(md, cpu);
5055                 }
5056
5057                 if (md->pmd_cputype == PMC_CPU_GENERIC)
5058                         pmc_generic_cpu_finalize(md);
5059                 else
5060                         pmc_md_finalize(md);
5061
5062                 pmc_mdep_free(md);
5063                 md = NULL;
5064                 pmc_restore_cpu_binding(&pb);
5065         }
5066
5067         /* Free per-cpu descriptors. */
5068         for (cpu = 0; cpu < maxcpu; cpu++) {
5069                 if (!pmc_cpu_is_active(cpu))
5070                         continue;
5071                 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_HR] != NULL,
5072                     ("[pmc,%d] Null hw cpu sample buffer cpu=%d", __LINE__,
5073                         cpu));
5074                 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_SR] != NULL,
5075                     ("[pmc,%d] Null sw cpu sample buffer cpu=%d", __LINE__,
5076                         cpu));
5077                 free(pmc_pcpu[cpu]->pc_sb[PMC_HR]->ps_callchains, M_PMC);
5078                 free(pmc_pcpu[cpu]->pc_sb[PMC_HR], M_PMC);
5079                 free(pmc_pcpu[cpu]->pc_sb[PMC_SR]->ps_callchains, M_PMC);
5080                 free(pmc_pcpu[cpu]->pc_sb[PMC_SR], M_PMC);
5081                 free(pmc_pcpu[cpu], M_PMC);
5082         }
5083
5084         free(pmc_pcpu, M_PMC);
5085         pmc_pcpu = NULL;
5086
5087         free(pmc_pcpu_saved, M_PMC);
5088         pmc_pcpu_saved = NULL;
5089
5090         if (pmc_pmcdisp) {
5091                 free(pmc_pmcdisp, M_PMC);
5092                 pmc_pmcdisp = NULL;
5093         }
5094
5095         if (pmc_rowindex_to_classdep) {
5096                 free(pmc_rowindex_to_classdep, M_PMC);
5097                 pmc_rowindex_to_classdep = NULL;
5098         }
5099
5100         pmclog_shutdown();
5101
5102         sx_xunlock(&pmc_sx);    /* we are done */
5103 }
5104
5105 /*
5106  * The function called at load/unload.
5107  */
5108
5109 static int
5110 load (struct module *module __unused, int cmd, void *arg __unused)
5111 {
5112         int error;
5113
5114         error = 0;
5115
5116         switch (cmd) {
5117         case MOD_LOAD :
5118                 /* initialize the subsystem */
5119                 error = pmc_initialize();
5120                 if (error != 0)
5121                         break;
5122                 PMCDBG(MOD,INI,1, "syscall=%d maxcpu=%d",
5123                     pmc_syscall_num, pmc_cpu_max());
5124                 break;
5125
5126
5127         case MOD_UNLOAD :
5128         case MOD_SHUTDOWN:
5129                 pmc_cleanup();
5130                 PMCDBG(MOD,INI,1, "%s", "unloaded");
5131                 break;
5132
5133         default :
5134                 error = EINVAL; /* XXX should panic(9) */
5135                 break;
5136         }
5137
5138         return error;
5139 }
5140
5141 /* memory pool */
5142 MALLOC_DEFINE(M_PMC, "pmc", "Memory space for the PMC module");