]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/hwpmc/hwpmc_mod.c
Merge libc++ trunk r338150 (just before the 7.0.0 branch point), and
[FreeBSD/FreeBSD.git] / sys / dev / hwpmc / hwpmc_mod.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2003-2008 Joseph Koshy
5  * Copyright (c) 2007 The FreeBSD Foundation
6  * Copyright (c) 2018 Matthew Macy
7  * All rights reserved.
8  *
9  * Portions of this software were developed by A. Joseph Koshy under
10  * sponsorship from the FreeBSD Foundation and Google, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include <sys/param.h>
39 #include <sys/eventhandler.h>
40 #include <sys/gtaskqueue.h>
41 #include <sys/jail.h>
42 #include <sys/kernel.h>
43 #include <sys/kthread.h>
44 #include <sys/limits.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/module.h>
48 #include <sys/mount.h>
49 #include <sys/mutex.h>
50 #include <sys/pmc.h>
51 #include <sys/pmckern.h>
52 #include <sys/pmclog.h>
53 #include <sys/priv.h>
54 #include <sys/proc.h>
55 #include <sys/queue.h>
56 #include <sys/resourcevar.h>
57 #include <sys/rwlock.h>
58 #include <sys/sched.h>
59 #include <sys/signalvar.h>
60 #include <sys/smp.h>
61 #include <sys/sx.h>
62 #include <sys/sysctl.h>
63 #include <sys/sysent.h>
64 #include <sys/systm.h>
65 #include <sys/vnode.h>
66
67 #include <sys/linker.h>         /* needs to be after <sys/malloc.h> */
68
69 #include <machine/atomic.h>
70 #include <machine/md_var.h>
71
72 #include <vm/vm.h>
73 #include <vm/vm_extern.h>
74 #include <vm/pmap.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_object.h>
77
78 #include "hwpmc_soft.h"
79
80 #ifdef NUMA
81 #define NDOMAINS vm_ndomains
82 #else
83 #define NDOMAINS 1
84 #define malloc_domain(size, type, domain, flags) malloc((size), (type), (flags))
85 #define free_domain(addr, type) free(addr, type)
86 #endif
87
88 #define PMC_EPOCH_ENTER() struct epoch_tracker pmc_et; epoch_enter_preempt(global_epoch_preempt, &pmc_et)
89 #define PMC_EPOCH_EXIT() epoch_exit_preempt(global_epoch_preempt, &pmc_et)
90
91 /*
92  * Types
93  */
94
95 enum pmc_flags {
96         PMC_FLAG_NONE     = 0x00, /* do nothing */
97         PMC_FLAG_REMOVE   = 0x01, /* atomically remove entry from hash */
98         PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */
99         PMC_FLAG_NOWAIT   = 0x04, /* do not wait for mallocs */
100 };
101
102 /*
103  * The offset in sysent where the syscall is allocated.
104  */
105
106 static int pmc_syscall_num = NO_SYSCALL;
107 struct pmc_cpu          **pmc_pcpu;      /* per-cpu state */
108 pmc_value_t             *pmc_pcpu_saved; /* saved PMC values: CSW handling */
109
110 #define PMC_PCPU_SAVED(C,R)     pmc_pcpu_saved[(R) + md->pmd_npmc*(C)]
111
112 struct mtx_pool         *pmc_mtxpool;
113 static int              *pmc_pmcdisp;    /* PMC row dispositions */
114
115 #define PMC_ROW_DISP_IS_FREE(R)         (pmc_pmcdisp[(R)] == 0)
116 #define PMC_ROW_DISP_IS_THREAD(R)       (pmc_pmcdisp[(R)] > 0)
117 #define PMC_ROW_DISP_IS_STANDALONE(R)   (pmc_pmcdisp[(R)] < 0)
118
119 #define PMC_MARK_ROW_FREE(R) do {                                         \
120         pmc_pmcdisp[(R)] = 0;                                             \
121 } while (0)
122
123 #define PMC_MARK_ROW_STANDALONE(R) do {                                   \
124         KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
125                     __LINE__));                                           \
126         atomic_add_int(&pmc_pmcdisp[(R)], -1);                            \
127         KASSERT(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active()),              \
128                 ("[pmc,%d] row disposition error", __LINE__));            \
129 } while (0)
130
131 #define PMC_UNMARK_ROW_STANDALONE(R) do {                                 \
132         atomic_add_int(&pmc_pmcdisp[(R)], 1);                             \
133         KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
134                     __LINE__));                                           \
135 } while (0)
136
137 #define PMC_MARK_ROW_THREAD(R) do {                                       \
138         KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
139                     __LINE__));                                           \
140         atomic_add_int(&pmc_pmcdisp[(R)], 1);                             \
141 } while (0)
142
143 #define PMC_UNMARK_ROW_THREAD(R) do {                                     \
144         atomic_add_int(&pmc_pmcdisp[(R)], -1);                            \
145         KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
146                     __LINE__));                                           \
147 } while (0)
148
149
150 /* various event handlers */
151 static eventhandler_tag pmc_exit_tag, pmc_fork_tag, pmc_kld_load_tag,
152     pmc_kld_unload_tag;
153
154 /* Module statistics */
155 struct pmc_driverstats pmc_stats;
156
157
158 /* Machine/processor dependent operations */
159 static struct pmc_mdep  *md;
160
161 /*
162  * Hash tables mapping owner processes and target threads to PMCs.
163  */
164
165 struct mtx pmc_processhash_mtx;         /* spin mutex */
166 static u_long pmc_processhashmask;
167 static LIST_HEAD(pmc_processhash, pmc_process)  *pmc_processhash;
168
169 /*
170  * Hash table of PMC owner descriptors.  This table is protected by
171  * the shared PMC "sx" lock.
172  */
173
174 static u_long pmc_ownerhashmask;
175 static LIST_HEAD(pmc_ownerhash, pmc_owner)      *pmc_ownerhash;
176
177 /*
178  * List of PMC owners with system-wide sampling PMCs.
179  */
180
181 static CK_LIST_HEAD(, pmc_owner)                        pmc_ss_owners;
182
183 /*
184  * List of free thread entries. This is protected by the spin
185  * mutex.
186  */
187 static struct mtx pmc_threadfreelist_mtx;       /* spin mutex */
188 static LIST_HEAD(, pmc_thread)                  pmc_threadfreelist;
189 static int pmc_threadfreelist_entries=0;
190 #define THREADENTRY_SIZE                                                \
191 (sizeof(struct pmc_thread) + (md->pmd_npmc * sizeof(struct pmc_threadpmcstate)))
192
193 /*
194  * Task to free thread descriptors
195  */
196 static struct grouptask free_gtask;
197
198 /*
199  * A map of row indices to classdep structures.
200  */
201 static struct pmc_classdep **pmc_rowindex_to_classdep;
202
203 /*
204  * Prototypes
205  */
206
207 #ifdef  HWPMC_DEBUG
208 static int      pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS);
209 static int      pmc_debugflags_parse(char *newstr, char *fence);
210 #endif
211
212 static int      load(struct module *module, int cmd, void *arg);
213 static int      pmc_add_sample(int ring, struct pmc *pm, struct trapframe *tf);
214 static void     pmc_add_thread_descriptors_from_proc(struct proc *p,
215     struct pmc_process *pp);
216 static int      pmc_attach_process(struct proc *p, struct pmc *pm);
217 static struct pmc *pmc_allocate_pmc_descriptor(void);
218 static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p);
219 static int      pmc_attach_one_process(struct proc *p, struct pmc *pm);
220 static int      pmc_can_allocate_rowindex(struct proc *p, unsigned int ri,
221     int cpu);
222 static int      pmc_can_attach(struct pmc *pm, struct proc *p);
223 static void     pmc_capture_user_callchain(int cpu, int soft, struct trapframe *tf);
224 static void     pmc_cleanup(void);
225 static int      pmc_detach_process(struct proc *p, struct pmc *pm);
226 static int      pmc_detach_one_process(struct proc *p, struct pmc *pm,
227     int flags);
228 static void     pmc_destroy_owner_descriptor(struct pmc_owner *po);
229 static void     pmc_destroy_pmc_descriptor(struct pmc *pm);
230 static void     pmc_destroy_process_descriptor(struct pmc_process *pp);
231 static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p);
232 static int      pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm);
233 static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po,
234     pmc_id_t pmc);
235 static struct pmc_process *pmc_find_process_descriptor(struct proc *p,
236     uint32_t mode);
237 static struct pmc_thread *pmc_find_thread_descriptor(struct pmc_process *pp,
238     struct thread *td, uint32_t mode);
239 static void     pmc_force_context_switch(void);
240 static void     pmc_link_target_process(struct pmc *pm,
241     struct pmc_process *pp);
242 static void     pmc_log_all_process_mappings(struct pmc_owner *po);
243 static void     pmc_log_kernel_mappings(struct pmc *pm);
244 static void     pmc_log_process_mappings(struct pmc_owner *po, struct proc *p);
245 static void     pmc_maybe_remove_owner(struct pmc_owner *po);
246 static void     pmc_process_csw_in(struct thread *td);
247 static void     pmc_process_csw_out(struct thread *td);
248 static void     pmc_process_exit(void *arg, struct proc *p);
249 static void     pmc_process_fork(void *arg, struct proc *p1,
250     struct proc *p2, int n);
251 static void     pmc_process_samples(int cpu, int soft);
252 static void     pmc_release_pmc_descriptor(struct pmc *pmc);
253 static void     pmc_process_thread_add(struct thread *td);
254 static void     pmc_process_thread_delete(struct thread *td);
255 static void     pmc_process_thread_userret(struct thread *td);
256 static void     pmc_remove_owner(struct pmc_owner *po);
257 static void     pmc_remove_process_descriptor(struct pmc_process *pp);
258 static void     pmc_restore_cpu_binding(struct pmc_binding *pb);
259 static void     pmc_save_cpu_binding(struct pmc_binding *pb);
260 static void     pmc_select_cpu(int cpu);
261 static int      pmc_start(struct pmc *pm);
262 static int      pmc_stop(struct pmc *pm);
263 static int      pmc_syscall_handler(struct thread *td, void *syscall_args);
264 static struct pmc_thread *pmc_thread_descriptor_pool_alloc(void);
265 static void     pmc_thread_descriptor_pool_drain(void);
266 static void     pmc_thread_descriptor_pool_free(struct pmc_thread *pt);
267 static void     pmc_unlink_target_process(struct pmc *pmc,
268     struct pmc_process *pp);
269 static int generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp);
270 static int generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp);
271 static struct pmc_mdep *pmc_generic_cpu_initialize(void);
272 static void pmc_generic_cpu_finalize(struct pmc_mdep *md);
273 static void pmc_post_callchain_callback(void);
274 static void pmc_process_threadcreate(struct thread *td);
275 static void pmc_process_threadexit(struct thread *td);
276 static void pmc_process_proccreate(struct proc *p);
277 static void pmc_process_allproc(struct pmc *pm);
278
279 /*
280  * Kernel tunables and sysctl(8) interface.
281  */
282
283 SYSCTL_DECL(_kern_hwpmc);
284 SYSCTL_NODE(_kern_hwpmc, OID_AUTO, stats, CTLFLAG_RW, 0, "HWPMC stats");
285
286
287 /* Stats. */
288 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_ignored, CTLFLAG_RW,
289                                    &pmc_stats.pm_intr_ignored, "# of interrupts ignored");
290 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_processed, CTLFLAG_RW,
291                                    &pmc_stats.pm_intr_processed, "# of interrupts processed");
292 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_bufferfull, CTLFLAG_RW,
293                                    &pmc_stats.pm_intr_bufferfull, "# of interrupts where buffer was full");
294 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, syscalls, CTLFLAG_RW,
295                                    &pmc_stats.pm_syscalls, "# of syscalls");
296 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, syscall_errors, CTLFLAG_RW,
297                                    &pmc_stats.pm_syscall_errors, "# of syscall_errors");
298 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, buffer_requests, CTLFLAG_RW,
299                                    &pmc_stats.pm_buffer_requests, "# of buffer requests");
300 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, buffer_requests_failed, CTLFLAG_RW,
301                                    &pmc_stats.pm_buffer_requests_failed, "# of buffer requests which failed");
302 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, log_sweeps, CTLFLAG_RW,
303                                    &pmc_stats.pm_log_sweeps, "# of ?");
304 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, merges, CTLFLAG_RW,
305                                    &pmc_stats.pm_merges, "# of times kernel stack was found for user trace");
306 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, overwrites, CTLFLAG_RW,
307                                    &pmc_stats.pm_overwrites, "# of times a sample was overwritten before being logged");
308
309 static int pmc_callchaindepth = PMC_CALLCHAIN_DEPTH;
310 SYSCTL_INT(_kern_hwpmc, OID_AUTO, callchaindepth, CTLFLAG_RDTUN,
311     &pmc_callchaindepth, 0, "depth of call chain records");
312
313 char pmc_cpuid[64];
314 SYSCTL_STRING(_kern_hwpmc, OID_AUTO, cpuid, CTLFLAG_RD,
315         pmc_cpuid, 0, "cpu version string");
316 #ifdef  HWPMC_DEBUG
317 struct pmc_debugflags pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS;
318 char    pmc_debugstr[PMC_DEBUG_STRSIZE];
319 TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,
320     sizeof(pmc_debugstr));
321 SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
322     CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
323     0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags");
324 #endif
325
326
327 /*
328  * kern.hwpmc.hashrows -- determines the number of rows in the
329  * of the hash table used to look up threads
330  */
331
332 static int pmc_hashsize = PMC_HASH_SIZE;
333 SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_RDTUN,
334     &pmc_hashsize, 0, "rows in hash tables");
335
336 /*
337  * kern.hwpmc.nsamples --- number of PC samples/callchain stacks per CPU
338  */
339
340 static int pmc_nsamples = PMC_NSAMPLES;
341 SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_RDTUN,
342     &pmc_nsamples, 0, "number of PC samples per CPU");
343
344
345 /*
346  * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool.
347  */
348
349 static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE;
350 SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_RDTUN,
351     &pmc_mtxpool_size, 0, "size of spin mutex pool");
352
353
354 /*
355  * kern.hwpmc.threadfreelist_entries -- number of free entries
356  */
357
358 SYSCTL_INT(_kern_hwpmc, OID_AUTO, threadfreelist_entries, CTLFLAG_RD,
359     &pmc_threadfreelist_entries, 0, "number of avalable thread entries");
360
361
362 /*
363  * kern.hwpmc.threadfreelist_max -- maximum number of free entries
364  */
365
366 static int pmc_threadfreelist_max = PMC_THREADLIST_MAX;
367 SYSCTL_INT(_kern_hwpmc, OID_AUTO, threadfreelist_max, CTLFLAG_RW,
368     &pmc_threadfreelist_max, 0,
369     "maximum number of available thread entries before freeing some");
370
371
372 /*
373  * security.bsd.unprivileged_syspmcs -- allow non-root processes to
374  * allocate system-wide PMCs.
375  *
376  * Allowing unprivileged processes to allocate system PMCs is convenient
377  * if system-wide measurements need to be taken concurrently with other
378  * per-process measurements.  This feature is turned off by default.
379  */
380
381 static int pmc_unprivileged_syspmcs = 0;
382 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RWTUN,
383     &pmc_unprivileged_syspmcs, 0,
384     "allow unprivileged process to allocate system PMCs");
385
386 /*
387  * Hash function.  Discard the lower 2 bits of the pointer since
388  * these are always zero for our uses.  The hash multiplier is
389  * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
390  */
391
392 #if     LONG_BIT == 64
393 #define _PMC_HM         11400714819323198486u
394 #elif   LONG_BIT == 32
395 #define _PMC_HM         2654435769u
396 #else
397 #error  Must know the size of 'long' to compile
398 #endif
399
400 #define PMC_HASH_PTR(P,M)       ((((unsigned long) (P) >> 2) * _PMC_HM) & (M))
401
402 /*
403  * Syscall structures
404  */
405
406 /* The `sysent' for the new syscall */
407 static struct sysent pmc_sysent = {
408         .sy_narg =      2,
409         .sy_call =      pmc_syscall_handler,
410 };
411
412 static struct syscall_module_data pmc_syscall_mod = {
413         .chainevh =     load,
414         .chainarg =     NULL,
415         .offset =       &pmc_syscall_num,
416         .new_sysent =   &pmc_sysent,
417         .old_sysent =   { .sy_narg = 0, .sy_call = NULL },
418         .flags =        SY_THR_STATIC_KLD,
419 };
420
421 static moduledata_t pmc_mod = {
422         .name =         PMC_MODULE_NAME,
423         .evhand =       syscall_module_handler,
424         .priv =         &pmc_syscall_mod,
425 };
426
427 #ifdef EARLY_AP_STARTUP
428 DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SYSCALLS, SI_ORDER_ANY);
429 #else
430 DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY);
431 #endif
432 MODULE_VERSION(pmc, PMC_VERSION);
433
434 #ifdef  HWPMC_DEBUG
435 enum pmc_dbgparse_state {
436         PMCDS_WS,               /* in whitespace */
437         PMCDS_MAJOR,            /* seen a major keyword */
438         PMCDS_MINOR
439 };
440
441 static int
442 pmc_debugflags_parse(char *newstr, char *fence)
443 {
444         char c, *p, *q;
445         struct pmc_debugflags *tmpflags;
446         int error, found, *newbits, tmp;
447         size_t kwlen;
448
449         tmpflags = malloc(sizeof(*tmpflags), M_PMC, M_WAITOK|M_ZERO);
450
451         p = newstr;
452         error = 0;
453
454         for (; p < fence && (c = *p); p++) {
455
456                 /* skip white space */
457                 if (c == ' ' || c == '\t')
458                         continue;
459
460                 /* look for a keyword followed by "=" */
461                 for (q = p; p < fence && (c = *p) && c != '='; p++)
462                         ;
463                 if (c != '=') {
464                         error = EINVAL;
465                         goto done;
466                 }
467
468                 kwlen = p - q;
469                 newbits = NULL;
470
471                 /* lookup flag group name */
472 #define DBG_SET_FLAG_MAJ(S,F)                                           \
473                 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0)  \
474                         newbits = &tmpflags->pdb_ ## F;
475
476                 DBG_SET_FLAG_MAJ("cpu",         CPU);
477                 DBG_SET_FLAG_MAJ("csw",         CSW);
478                 DBG_SET_FLAG_MAJ("logging",     LOG);
479                 DBG_SET_FLAG_MAJ("module",      MOD);
480                 DBG_SET_FLAG_MAJ("md",          MDP);
481                 DBG_SET_FLAG_MAJ("owner",       OWN);
482                 DBG_SET_FLAG_MAJ("pmc",         PMC);
483                 DBG_SET_FLAG_MAJ("process",     PRC);
484                 DBG_SET_FLAG_MAJ("sampling",    SAM);
485
486                 if (newbits == NULL) {
487                         error = EINVAL;
488                         goto done;
489                 }
490
491                 p++;            /* skip the '=' */
492
493                 /* Now parse the individual flags */
494                 tmp = 0;
495         newflag:
496                 for (q = p; p < fence && (c = *p); p++)
497                         if (c == ' ' || c == '\t' || c == ',')
498                                 break;
499
500                 /* p == fence or c == ws or c == "," or c == 0 */
501
502                 if ((kwlen = p - q) == 0) {
503                         *newbits = tmp;
504                         continue;
505                 }
506
507                 found = 0;
508 #define DBG_SET_FLAG_MIN(S,F)                                           \
509                 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0)  \
510                         tmp |= found = (1 << PMC_DEBUG_MIN_ ## F)
511
512                 /* a '*' denotes all possible flags in the group */
513                 if (kwlen == 1 && *q == '*')
514                         tmp = found = ~0;
515                 /* look for individual flag names */
516                 DBG_SET_FLAG_MIN("allocaterow", ALR);
517                 DBG_SET_FLAG_MIN("allocate",    ALL);
518                 DBG_SET_FLAG_MIN("attach",      ATT);
519                 DBG_SET_FLAG_MIN("bind",        BND);
520                 DBG_SET_FLAG_MIN("config",      CFG);
521                 DBG_SET_FLAG_MIN("exec",        EXC);
522                 DBG_SET_FLAG_MIN("exit",        EXT);
523                 DBG_SET_FLAG_MIN("find",        FND);
524                 DBG_SET_FLAG_MIN("flush",       FLS);
525                 DBG_SET_FLAG_MIN("fork",        FRK);
526                 DBG_SET_FLAG_MIN("getbuf",      GTB);
527                 DBG_SET_FLAG_MIN("hook",        PMH);
528                 DBG_SET_FLAG_MIN("init",        INI);
529                 DBG_SET_FLAG_MIN("intr",        INT);
530                 DBG_SET_FLAG_MIN("linktarget",  TLK);
531                 DBG_SET_FLAG_MIN("mayberemove", OMR);
532                 DBG_SET_FLAG_MIN("ops",         OPS);
533                 DBG_SET_FLAG_MIN("read",        REA);
534                 DBG_SET_FLAG_MIN("register",    REG);
535                 DBG_SET_FLAG_MIN("release",     REL);
536                 DBG_SET_FLAG_MIN("remove",      ORM);
537                 DBG_SET_FLAG_MIN("sample",      SAM);
538                 DBG_SET_FLAG_MIN("scheduleio",  SIO);
539                 DBG_SET_FLAG_MIN("select",      SEL);
540                 DBG_SET_FLAG_MIN("signal",      SIG);
541                 DBG_SET_FLAG_MIN("swi",         SWI);
542                 DBG_SET_FLAG_MIN("swo",         SWO);
543                 DBG_SET_FLAG_MIN("start",       STA);
544                 DBG_SET_FLAG_MIN("stop",        STO);
545                 DBG_SET_FLAG_MIN("syscall",     PMS);
546                 DBG_SET_FLAG_MIN("unlinktarget", TUL);
547                 DBG_SET_FLAG_MIN("write",       WRI);
548                 if (found == 0) {
549                         /* unrecognized flag name */
550                         error = EINVAL;
551                         goto done;
552                 }
553
554                 if (c == 0 || c == ' ' || c == '\t') {  /* end of flag group */
555                         *newbits = tmp;
556                         continue;
557                 }
558
559                 p++;
560                 goto newflag;
561         }
562
563         /* save the new flag set */
564         bcopy(tmpflags, &pmc_debugflags, sizeof(pmc_debugflags));
565
566  done:
567         free(tmpflags, M_PMC);
568         return error;
569 }
570
571 static int
572 pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS)
573 {
574         char *fence, *newstr;
575         int error;
576         unsigned int n;
577
578         (void) arg1; (void) arg2; /* unused parameters */
579
580         n = sizeof(pmc_debugstr);
581         newstr = malloc(n, M_PMC, M_WAITOK|M_ZERO);
582         (void) strlcpy(newstr, pmc_debugstr, n);
583
584         error = sysctl_handle_string(oidp, newstr, n, req);
585
586         /* if there is a new string, parse and copy it */
587         if (error == 0 && req->newptr != NULL) {
588                 fence = newstr + (n < req->newlen ? n : req->newlen + 1);
589                 if ((error = pmc_debugflags_parse(newstr, fence)) == 0)
590                         (void) strlcpy(pmc_debugstr, newstr,
591                             sizeof(pmc_debugstr));
592         }
593
594         free(newstr, M_PMC);
595
596         return error;
597 }
598 #endif
599
600 /*
601  * Map a row index to a classdep structure and return the adjusted row
602  * index for the PMC class index.
603  */
604 static struct pmc_classdep *
605 pmc_ri_to_classdep(struct pmc_mdep *md, int ri, int *adjri)
606 {
607         struct pmc_classdep *pcd;
608
609         (void) md;
610
611         KASSERT(ri >= 0 && ri < md->pmd_npmc,
612             ("[pmc,%d] illegal row-index %d", __LINE__, ri));
613
614         pcd = pmc_rowindex_to_classdep[ri];
615
616         KASSERT(pcd != NULL,
617             ("[pmc,%d] ri %d null pcd", __LINE__, ri));
618
619         *adjri = ri - pcd->pcd_ri;
620
621         KASSERT(*adjri >= 0 && *adjri < pcd->pcd_num,
622             ("[pmc,%d] adjusted row-index %d", __LINE__, *adjri));
623
624         return (pcd);
625 }
626
627 /*
628  * Concurrency Control
629  *
630  * The driver manages the following data structures:
631  *
632  *   - target process descriptors, one per target process
633  *   - owner process descriptors (and attached lists), one per owner process
634  *   - lookup hash tables for owner and target processes
635  *   - PMC descriptors (and attached lists)
636  *   - per-cpu hardware state
637  *   - the 'hook' variable through which the kernel calls into
638  *     this module
639  *   - the machine hardware state (managed by the MD layer)
640  *
641  * These data structures are accessed from:
642  *
643  * - thread context-switch code
644  * - interrupt handlers (possibly on multiple cpus)
645  * - kernel threads on multiple cpus running on behalf of user
646  *   processes doing system calls
647  * - this driver's private kernel threads
648  *
649  * = Locks and Locking strategy =
650  *
651  * The driver uses four locking strategies for its operation:
652  *
653  * - The global SX lock "pmc_sx" is used to protect internal
654  *   data structures.
655  *
656  *   Calls into the module by syscall() start with this lock being
657  *   held in exclusive mode.  Depending on the requested operation,
658  *   the lock may be downgraded to 'shared' mode to allow more
659  *   concurrent readers into the module.  Calls into the module from
660  *   other parts of the kernel acquire the lock in shared mode.
661  *
662  *   This SX lock is held in exclusive mode for any operations that
663  *   modify the linkages between the driver's internal data structures.
664  *
665  *   The 'pmc_hook' function pointer is also protected by this lock.
666  *   It is only examined with the sx lock held in exclusive mode.  The
667  *   kernel module is allowed to be unloaded only with the sx lock held
668  *   in exclusive mode.  In normal syscall handling, after acquiring the
669  *   pmc_sx lock we first check that 'pmc_hook' is non-null before
670  *   proceeding.  This prevents races between the thread unloading the module
671  *   and other threads seeking to use the module.
672  *
673  * - Lookups of target process structures and owner process structures
674  *   cannot use the global "pmc_sx" SX lock because these lookups need
675  *   to happen during context switches and in other critical sections
676  *   where sleeping is not allowed.  We protect these lookup tables
677  *   with their own private spin-mutexes, "pmc_processhash_mtx" and
678  *   "pmc_ownerhash_mtx".
679  *
680  * - Interrupt handlers work in a lock free manner.  At interrupt
681  *   time, handlers look at the PMC pointer (phw->phw_pmc) configured
682  *   when the PMC was started.  If this pointer is NULL, the interrupt
683  *   is ignored after updating driver statistics.  We ensure that this
684  *   pointer is set (using an atomic operation if necessary) before the
685  *   PMC hardware is started.  Conversely, this pointer is unset atomically
686  *   only after the PMC hardware is stopped.
687  *
688  *   We ensure that everything needed for the operation of an
689  *   interrupt handler is available without it needing to acquire any
690  *   locks.  We also ensure that a PMC's software state is destroyed only
691  *   after the PMC is taken off hardware (on all CPUs).
692  *
693  * - Context-switch handling with process-private PMCs needs more
694  *   care.
695  *
696  *   A given process may be the target of multiple PMCs.  For example,
697  *   PMCATTACH and PMCDETACH may be requested by a process on one CPU
698  *   while the target process is running on another.  A PMC could also
699  *   be getting released because its owner is exiting.  We tackle
700  *   these situations in the following manner:
701  *
702  *   - each target process structure 'pmc_process' has an array
703  *     of 'struct pmc *' pointers, one for each hardware PMC.
704  *
705  *   - At context switch IN time, each "target" PMC in RUNNING state
706  *     gets started on hardware and a pointer to each PMC is copied into
707  *     the per-cpu phw array.  The 'runcount' for the PMC is
708  *     incremented.
709  *
710  *   - At context switch OUT time, all process-virtual PMCs are stopped
711  *     on hardware.  The saved value is added to the PMCs value field
712  *     only if the PMC is in a non-deleted state (the PMCs state could
713  *     have changed during the current time slice).
714  *
715  *     Note that since in-between a switch IN on a processor and a switch
716  *     OUT, the PMC could have been released on another CPU.  Therefore
717  *     context switch OUT always looks at the hardware state to turn
718  *     OFF PMCs and will update a PMC's saved value only if reachable
719  *     from the target process record.
720  *
721  *   - OP PMCRELEASE could be called on a PMC at any time (the PMC could
722  *     be attached to many processes at the time of the call and could
723  *     be active on multiple CPUs).
724  *
725  *     We prevent further scheduling of the PMC by marking it as in
726  *     state 'DELETED'.  If the runcount of the PMC is non-zero then
727  *     this PMC is currently running on a CPU somewhere.  The thread
728  *     doing the PMCRELEASE operation waits by repeatedly doing a
729  *     pause() till the runcount comes to zero.
730  *
731  * The contents of a PMC descriptor (struct pmc) are protected using
732  * a spin-mutex.  In order to save space, we use a mutex pool.
733  *
734  * In terms of lock types used by witness(4), we use:
735  * - Type "pmc-sx", used by the global SX lock.
736  * - Type "pmc-sleep", for sleep mutexes used by logger threads.
737  * - Type "pmc-per-proc", for protecting PMC owner descriptors.
738  * - Type "pmc-leaf", used for all other spin mutexes.
739  */
740
741 /*
742  * save the cpu binding of the current kthread
743  */
744
745 static void
746 pmc_save_cpu_binding(struct pmc_binding *pb)
747 {
748         PMCDBG0(CPU,BND,2, "save-cpu");
749         thread_lock(curthread);
750         pb->pb_bound = sched_is_bound(curthread);
751         pb->pb_cpu   = curthread->td_oncpu;
752         thread_unlock(curthread);
753         PMCDBG1(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu);
754 }
755
756 /*
757  * restore the cpu binding of the current thread
758  */
759
760 static void
761 pmc_restore_cpu_binding(struct pmc_binding *pb)
762 {
763         PMCDBG2(CPU,BND,2, "restore-cpu curcpu=%d restore=%d",
764             curthread->td_oncpu, pb->pb_cpu);
765         thread_lock(curthread);
766         if (pb->pb_bound)
767                 sched_bind(curthread, pb->pb_cpu);
768         else
769                 sched_unbind(curthread);
770         thread_unlock(curthread);
771         PMCDBG0(CPU,BND,2, "restore-cpu done");
772 }
773
774 /*
775  * move execution over the specified cpu and bind it there.
776  */
777
778 static void
779 pmc_select_cpu(int cpu)
780 {
781         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
782             ("[pmc,%d] bad cpu number %d", __LINE__, cpu));
783
784         /* Never move to an inactive CPU. */
785         KASSERT(pmc_cpu_is_active(cpu), ("[pmc,%d] selecting inactive "
786             "CPU %d", __LINE__, cpu));
787
788         PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d", cpu);
789         thread_lock(curthread);
790         sched_bind(curthread, cpu);
791         thread_unlock(curthread);
792
793         KASSERT(curthread->td_oncpu == cpu,
794             ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__,
795                 cpu, curthread->td_oncpu));
796
797         PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d ok", cpu);
798 }
799
800 /*
801  * Force a context switch.
802  *
803  * We do this by pause'ing for 1 tick -- invoking mi_switch() is not
804  * guaranteed to force a context switch.
805  */
806
807 static void
808 pmc_force_context_switch(void)
809 {
810
811         pause("pmcctx", 1);
812 }
813
814 uint64_t
815 pmc_rdtsc(void)
816 {
817 #if defined(__i386__) || defined(__amd64__)
818         if (__predict_true(amd_feature & AMDID_RDTSCP))
819                 return rdtscp();
820         else
821                 return rdtsc();
822 #else
823         return get_cyclecount();
824 #endif
825 }
826
827 /*
828  * Get the file name for an executable.  This is a simple wrapper
829  * around vn_fullpath(9).
830  */
831
832 static void
833 pmc_getfilename(struct vnode *v, char **fullpath, char **freepath)
834 {
835
836         *fullpath = "unknown";
837         *freepath = NULL;
838         vn_fullpath(curthread, v, fullpath, freepath);
839 }
840
841 /*
842  * remove an process owning PMCs
843  */
844
845 void
846 pmc_remove_owner(struct pmc_owner *po)
847 {
848         struct pmc *pm, *tmp;
849
850         sx_assert(&pmc_sx, SX_XLOCKED);
851
852         PMCDBG1(OWN,ORM,1, "remove-owner po=%p", po);
853
854         /* Remove descriptor from the owner hash table */
855         LIST_REMOVE(po, po_next);
856
857         /* release all owned PMC descriptors */
858         LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) {
859                 PMCDBG1(OWN,ORM,2, "pmc=%p", pm);
860                 KASSERT(pm->pm_owner == po,
861                     ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po));
862
863                 pmc_release_pmc_descriptor(pm); /* will unlink from the list */
864                 pmc_destroy_pmc_descriptor(pm);
865         }
866
867         KASSERT(po->po_sscount == 0,
868             ("[pmc,%d] SS count not zero", __LINE__));
869         KASSERT(LIST_EMPTY(&po->po_pmcs),
870             ("[pmc,%d] PMC list not empty", __LINE__));
871
872         /* de-configure the log file if present */
873         if (po->po_flags & PMC_PO_OWNS_LOGFILE)
874                 pmclog_deconfigure_log(po);
875 }
876
877 /*
878  * remove an owner process record if all conditions are met.
879  */
880
881 static void
882 pmc_maybe_remove_owner(struct pmc_owner *po)
883 {
884
885         PMCDBG1(OWN,OMR,1, "maybe-remove-owner po=%p", po);
886
887         /*
888          * Remove owner record if
889          * - this process does not own any PMCs
890          * - this process has not allocated a system-wide sampling buffer
891          */
892
893         if (LIST_EMPTY(&po->po_pmcs) &&
894             ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)) {
895                 pmc_remove_owner(po);
896                 pmc_destroy_owner_descriptor(po);
897         }
898 }
899
900 /*
901  * Add an association between a target process and a PMC.
902  */
903
904 static void
905 pmc_link_target_process(struct pmc *pm, struct pmc_process *pp)
906 {
907         int ri;
908         struct pmc_target *pt;
909 #ifdef INVARIANTS
910         struct pmc_thread *pt_td;
911 #endif
912
913         sx_assert(&pmc_sx, SX_XLOCKED);
914
915         KASSERT(pm != NULL && pp != NULL,
916             ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
917         KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
918             ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d",
919                 __LINE__, pm, pp->pp_proc->p_pid));
920         KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= ((int) md->pmd_npmc - 1),
921             ("[pmc,%d] Illegal reference count %d for process record %p",
922                 __LINE__, pp->pp_refcnt, (void *) pp));
923
924         ri = PMC_TO_ROWINDEX(pm);
925
926         PMCDBG3(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p",
927             pm, ri, pp);
928
929 #ifdef  HWPMC_DEBUG
930         LIST_FOREACH(pt, &pm->pm_targets, pt_next)
931             if (pt->pt_process == pp)
932                     KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets",
933                                 __LINE__, pp, pm));
934 #endif
935
936         pt = malloc(sizeof(struct pmc_target), M_PMC, M_WAITOK|M_ZERO);
937         pt->pt_process = pp;
938
939         LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next);
940
941         atomic_store_rel_ptr((uintptr_t *)&pp->pp_pmcs[ri].pp_pmc,
942             (uintptr_t)pm);
943
944         if (pm->pm_owner->po_owner == pp->pp_proc)
945                 pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER;
946
947         /*
948          * Initialize the per-process values at this row index.
949          */
950         pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ?
951             pm->pm_sc.pm_reloadcount : 0;
952
953         pp->pp_refcnt++;
954
955 #ifdef INVARIANTS
956         /* Confirm that the per-thread values at this row index are cleared. */
957         if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
958                 mtx_lock_spin(pp->pp_tdslock);
959                 LIST_FOREACH(pt_td, &pp->pp_tds, pt_next) {
960                         KASSERT(pt_td->pt_pmcs[ri].pt_pmcval == (pmc_value_t) 0,
961                             ("[pmc,%d] pt_pmcval not cleared for pid=%d at "
962                             "ri=%d", __LINE__, pp->pp_proc->p_pid, ri));
963                 }
964                 mtx_unlock_spin(pp->pp_tdslock);
965         }
966 #endif
967 }
968
969 /*
970  * Removes the association between a target process and a PMC.
971  */
972
973 static void
974 pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
975 {
976         int ri;
977         struct proc *p;
978         struct pmc_target *ptgt;
979         struct pmc_thread *pt;
980
981         sx_assert(&pmc_sx, SX_XLOCKED);
982
983         KASSERT(pm != NULL && pp != NULL,
984             ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
985
986         KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt <= (int) md->pmd_npmc,
987             ("[pmc,%d] Illegal ref count %d on process record %p",
988                 __LINE__, pp->pp_refcnt, (void *) pp));
989
990         ri = PMC_TO_ROWINDEX(pm);
991
992         PMCDBG3(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p",
993             pm, ri, pp);
994
995         KASSERT(pp->pp_pmcs[ri].pp_pmc == pm,
996             ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__,
997                 ri, pm, pp->pp_pmcs[ri].pp_pmc));
998
999         pp->pp_pmcs[ri].pp_pmc = NULL;
1000         pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0;
1001
1002         /* Clear the per-thread values at this row index. */
1003         if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
1004                 mtx_lock_spin(pp->pp_tdslock);
1005                 LIST_FOREACH(pt, &pp->pp_tds, pt_next)
1006                         pt->pt_pmcs[ri].pt_pmcval = (pmc_value_t) 0;
1007                 mtx_unlock_spin(pp->pp_tdslock);
1008         }
1009
1010         /* Remove owner-specific flags */
1011         if (pm->pm_owner->po_owner == pp->pp_proc) {
1012                 pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS;
1013                 pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER;
1014         }
1015
1016         pp->pp_refcnt--;
1017
1018         /* Remove the target process from the PMC structure */
1019         LIST_FOREACH(ptgt, &pm->pm_targets, pt_next)
1020                 if (ptgt->pt_process == pp)
1021                         break;
1022
1023         KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found "
1024                     "in pmc %p", __LINE__, pp->pp_proc, pp, pm));
1025
1026         LIST_REMOVE(ptgt, pt_next);
1027         free(ptgt, M_PMC);
1028
1029         /* if the PMC now lacks targets, send the owner a SIGIO */
1030         if (LIST_EMPTY(&pm->pm_targets)) {
1031                 p = pm->pm_owner->po_owner;
1032                 PROC_LOCK(p);
1033                 kern_psignal(p, SIGIO);
1034                 PROC_UNLOCK(p);
1035
1036                 PMCDBG2(PRC,SIG,2, "signalling proc=%p signal=%d", p,
1037                     SIGIO);
1038         }
1039 }
1040
1041 /*
1042  * Check if PMC 'pm' may be attached to target process 't'.
1043  */
1044
1045 static int
1046 pmc_can_attach(struct pmc *pm, struct proc *t)
1047 {
1048         struct proc *o;         /* pmc owner */
1049         struct ucred *oc, *tc;  /* owner, target credentials */
1050         int decline_attach, i;
1051
1052         /*
1053          * A PMC's owner can always attach that PMC to itself.
1054          */
1055
1056         if ((o = pm->pm_owner->po_owner) == t)
1057                 return 0;
1058
1059         PROC_LOCK(o);
1060         oc = o->p_ucred;
1061         crhold(oc);
1062         PROC_UNLOCK(o);
1063
1064         PROC_LOCK(t);
1065         tc = t->p_ucred;
1066         crhold(tc);
1067         PROC_UNLOCK(t);
1068
1069         /*
1070          * The effective uid of the PMC owner should match at least one
1071          * of the {effective,real,saved} uids of the target process.
1072          */
1073
1074         decline_attach = oc->cr_uid != tc->cr_uid &&
1075             oc->cr_uid != tc->cr_svuid &&
1076             oc->cr_uid != tc->cr_ruid;
1077
1078         /*
1079          * Every one of the target's group ids, must be in the owner's
1080          * group list.
1081          */
1082         for (i = 0; !decline_attach && i < tc->cr_ngroups; i++)
1083                 decline_attach = !groupmember(tc->cr_groups[i], oc);
1084
1085         /* check the read and saved gids too */
1086         if (decline_attach == 0)
1087                 decline_attach = !groupmember(tc->cr_rgid, oc) ||
1088                     !groupmember(tc->cr_svgid, oc);
1089
1090         crfree(tc);
1091         crfree(oc);
1092
1093         return !decline_attach;
1094 }
1095
1096 /*
1097  * Attach a process to a PMC.
1098  */
1099
1100 static int
1101 pmc_attach_one_process(struct proc *p, struct pmc *pm)
1102 {
1103         int ri, error;
1104         char *fullpath, *freepath;
1105         struct pmc_process      *pp;
1106
1107         sx_assert(&pmc_sx, SX_XLOCKED);
1108
1109         PMCDBG5(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm,
1110             PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1111
1112         /*
1113          * Locate the process descriptor corresponding to process 'p',
1114          * allocating space as needed.
1115          *
1116          * Verify that rowindex 'pm_rowindex' is free in the process
1117          * descriptor.
1118          *
1119          * If not, allocate space for a descriptor and link the
1120          * process descriptor and PMC.
1121          */
1122         ri = PMC_TO_ROWINDEX(pm);
1123
1124         /* mark process as using HWPMCs */
1125         PROC_LOCK(p);
1126         p->p_flag |= P_HWPMC;
1127         PROC_UNLOCK(p);
1128
1129         if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL) {
1130                 error = ENOMEM;
1131                 goto fail;
1132         }
1133
1134         if (pp->pp_pmcs[ri].pp_pmc == pm) {/* already present at slot [ri] */
1135                 error = EEXIST;
1136                 goto fail;
1137         }
1138
1139         if (pp->pp_pmcs[ri].pp_pmc != NULL) {
1140                 error = EBUSY;
1141                 goto fail;
1142         }
1143
1144         pmc_link_target_process(pm, pp);
1145
1146         if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) &&
1147             (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) == 0)
1148                 pm->pm_flags |= PMC_F_NEEDS_LOGFILE;
1149
1150         pm->pm_flags |= PMC_F_ATTACH_DONE; /* mark as attached */
1151
1152         /* issue an attach event to a configured log file */
1153         if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) {
1154                 if (p->p_flag & P_KPROC) {
1155                         fullpath = kernelname;
1156                         freepath = NULL;
1157                 } else {
1158                         pmc_getfilename(p->p_textvp, &fullpath, &freepath);
1159                         pmclog_process_pmcattach(pm, p->p_pid, fullpath);
1160                 }
1161                 free(freepath, M_TEMP);
1162                 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1163                         pmc_log_process_mappings(pm->pm_owner, p);
1164         }
1165
1166         return (0);
1167  fail:
1168         PROC_LOCK(p);
1169         p->p_flag &= ~P_HWPMC;
1170         PROC_UNLOCK(p);
1171         return (error);
1172 }
1173
1174 /*
1175  * Attach a process and optionally its children
1176  */
1177
1178 static int
1179 pmc_attach_process(struct proc *p, struct pmc *pm)
1180 {
1181         int error;
1182         struct proc *top;
1183
1184         sx_assert(&pmc_sx, SX_XLOCKED);
1185
1186         PMCDBG5(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm,
1187             PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1188
1189
1190         /*
1191          * If this PMC successfully allowed a GETMSR operation
1192          * in the past, disallow further ATTACHes.
1193          */
1194
1195         if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0)
1196                 return EPERM;
1197
1198         if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1199                 return pmc_attach_one_process(p, pm);
1200
1201         /*
1202          * Traverse all child processes, attaching them to
1203          * this PMC.
1204          */
1205
1206         sx_slock(&proctree_lock);
1207
1208         top = p;
1209
1210         for (;;) {
1211                 if ((error = pmc_attach_one_process(p, pm)) != 0)
1212                         break;
1213                 if (!LIST_EMPTY(&p->p_children))
1214                         p = LIST_FIRST(&p->p_children);
1215                 else for (;;) {
1216                         if (p == top)
1217                                 goto done;
1218                         if (LIST_NEXT(p, p_sibling)) {
1219                                 p = LIST_NEXT(p, p_sibling);
1220                                 break;
1221                         }
1222                         p = p->p_pptr;
1223                 }
1224         }
1225
1226         if (error)
1227                 (void) pmc_detach_process(top, pm);
1228
1229  done:
1230         sx_sunlock(&proctree_lock);
1231         return error;
1232 }
1233
1234 /*
1235  * Detach a process from a PMC.  If there are no other PMCs tracking
1236  * this process, remove the process structure from its hash table.  If
1237  * 'flags' contains PMC_FLAG_REMOVE, then free the process structure.
1238  */
1239
1240 static int
1241 pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags)
1242 {
1243         int ri;
1244         struct pmc_process *pp;
1245
1246         sx_assert(&pmc_sx, SX_XLOCKED);
1247
1248         KASSERT(pm != NULL,
1249             ("[pmc,%d] null pm pointer", __LINE__));
1250
1251         ri = PMC_TO_ROWINDEX(pm);
1252
1253         PMCDBG6(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x",
1254             pm, ri, p, p->p_pid, p->p_comm, flags);
1255
1256         if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
1257                 return ESRCH;
1258
1259         if (pp->pp_pmcs[ri].pp_pmc != pm)
1260                 return EINVAL;
1261
1262         pmc_unlink_target_process(pm, pp);
1263
1264         /* Issue a detach entry if a log file is configured */
1265         if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE)
1266                 pmclog_process_pmcdetach(pm, p->p_pid);
1267
1268         /*
1269          * If there are no PMCs targeting this process, we remove its
1270          * descriptor from the target hash table and unset the P_HWPMC
1271          * flag in the struct proc.
1272          */
1273         KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc,
1274             ("[pmc,%d] Illegal refcnt %d for process struct %p",
1275                 __LINE__, pp->pp_refcnt, pp));
1276
1277         if (pp->pp_refcnt != 0) /* still a target of some PMC */
1278                 return 0;
1279
1280         pmc_remove_process_descriptor(pp);
1281
1282         if (flags & PMC_FLAG_REMOVE)
1283                 pmc_destroy_process_descriptor(pp);
1284
1285         PROC_LOCK(p);
1286         p->p_flag &= ~P_HWPMC;
1287         PROC_UNLOCK(p);
1288
1289         return 0;
1290 }
1291
1292 /*
1293  * Detach a process and optionally its descendants from a PMC.
1294  */
1295
1296 static int
1297 pmc_detach_process(struct proc *p, struct pmc *pm)
1298 {
1299         struct proc *top;
1300
1301         sx_assert(&pmc_sx, SX_XLOCKED);
1302
1303         PMCDBG5(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm,
1304             PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1305
1306         if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1307                 return pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1308
1309         /*
1310          * Traverse all children, detaching them from this PMC.  We
1311          * ignore errors since we could be detaching a PMC from a
1312          * partially attached proc tree.
1313          */
1314
1315         sx_slock(&proctree_lock);
1316
1317         top = p;
1318
1319         for (;;) {
1320                 (void) pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1321
1322                 if (!LIST_EMPTY(&p->p_children))
1323                         p = LIST_FIRST(&p->p_children);
1324                 else for (;;) {
1325                         if (p == top)
1326                                 goto done;
1327                         if (LIST_NEXT(p, p_sibling)) {
1328                                 p = LIST_NEXT(p, p_sibling);
1329                                 break;
1330                         }
1331                         p = p->p_pptr;
1332                 }
1333         }
1334
1335  done:
1336         sx_sunlock(&proctree_lock);
1337
1338         if (LIST_EMPTY(&pm->pm_targets))
1339                 pm->pm_flags &= ~PMC_F_ATTACH_DONE;
1340
1341         return 0;
1342 }
1343
1344
1345 /*
1346  * Thread context switch IN
1347  */
1348
1349 static void
1350 pmc_process_csw_in(struct thread *td)
1351 {
1352         int cpu;
1353         unsigned int adjri, ri;
1354         struct pmc *pm;
1355         struct proc *p;
1356         struct pmc_cpu *pc;
1357         struct pmc_hw *phw;
1358         pmc_value_t newvalue;
1359         struct pmc_process *pp;
1360         struct pmc_thread *pt;
1361         struct pmc_classdep *pcd;
1362
1363         p = td->td_proc;
1364         pt = NULL;
1365         if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL)
1366                 return;
1367
1368         KASSERT(pp->pp_proc == td->td_proc,
1369             ("[pmc,%d] not my thread state", __LINE__));
1370
1371         critical_enter(); /* no preemption from this point */
1372
1373         cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1374
1375         PMCDBG5(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1376             p->p_pid, p->p_comm, pp);
1377
1378         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1379             ("[pmc,%d] weird CPU id %d", __LINE__, cpu));
1380
1381         pc = pmc_pcpu[cpu];
1382
1383         for (ri = 0; ri < md->pmd_npmc; ri++) {
1384
1385                 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL)
1386                         continue;
1387
1388                 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
1389                     ("[pmc,%d] Target PMC in non-virtual mode (%d)",
1390                         __LINE__, PMC_TO_MODE(pm)));
1391
1392                 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1393                     ("[pmc,%d] Row index mismatch pmc %d != ri %d",
1394                         __LINE__, PMC_TO_ROWINDEX(pm), ri));
1395
1396                 /*
1397                  * Only PMCs that are marked as 'RUNNING' need
1398                  * be placed on hardware.
1399                  */
1400
1401                 if (pm->pm_state != PMC_STATE_RUNNING)
1402                         continue;
1403
1404                 /* increment PMC runcount */
1405                 counter_u64_add(pm->pm_runcount, 1);
1406
1407                 /* configure the HWPMC we are going to use. */
1408                 pcd = pmc_ri_to_classdep(md, ri, &adjri);
1409                 pcd->pcd_config_pmc(cpu, adjri, pm);
1410
1411                 phw = pc->pc_hwpmcs[ri];
1412
1413                 KASSERT(phw != NULL,
1414                     ("[pmc,%d] null hw pointer", __LINE__));
1415
1416                 KASSERT(phw->phw_pmc == pm,
1417                     ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__,
1418                         phw->phw_pmc, pm));
1419
1420                 /*
1421                  * Write out saved value and start the PMC.
1422                  *
1423                  * Sampling PMCs use a per-thread value, while
1424                  * counting mode PMCs use a per-pmc value that is
1425                  * inherited across descendants.
1426                  */
1427                 if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
1428                         if (pt == NULL)
1429                                 pt = pmc_find_thread_descriptor(pp, td,
1430                                     PMC_FLAG_NONE);
1431
1432                         KASSERT(pt != NULL,
1433                             ("[pmc,%d] No thread found for td=%p", __LINE__,
1434                             td));
1435
1436                         mtx_pool_lock_spin(pmc_mtxpool, pm);
1437
1438                         /*
1439                          * If we have a thread descriptor, use the per-thread
1440                          * counter in the descriptor. If not, we will use
1441                          * a per-process counter. 
1442                          *
1443                          * TODO: Remove the per-process "safety net" once
1444                          * we have thoroughly tested that we don't hit the
1445                          * above assert.
1446                          */
1447                         if (pt != NULL) {
1448                                 if (pt->pt_pmcs[ri].pt_pmcval > 0)
1449                                         newvalue = pt->pt_pmcs[ri].pt_pmcval;
1450                                 else
1451                                         newvalue = pm->pm_sc.pm_reloadcount;
1452                         } else {
1453                                 /*
1454                                  * Use the saved value calculated after the most
1455                                  * recent time a thread using the shared counter
1456                                  * switched out. Reset the saved count in case
1457                                  * another thread from this process switches in
1458                                  * before any threads switch out.
1459                                  */
1460
1461                                 newvalue = pp->pp_pmcs[ri].pp_pmcval;
1462                                 pp->pp_pmcs[ri].pp_pmcval =
1463                                     pm->pm_sc.pm_reloadcount;
1464                         }
1465                         mtx_pool_unlock_spin(pmc_mtxpool, pm);
1466                         KASSERT(newvalue > 0 && newvalue <=
1467                             pm->pm_sc.pm_reloadcount,
1468                             ("[pmc,%d] pmcval outside of expected range cpu=%d "
1469                             "ri=%d pmcval=%jx pm_reloadcount=%jx", __LINE__,
1470                             cpu, ri, newvalue, pm->pm_sc.pm_reloadcount));
1471                 } else {
1472                         KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC,
1473                             ("[pmc,%d] illegal mode=%d", __LINE__,
1474                             PMC_TO_MODE(pm)));
1475                         mtx_pool_lock_spin(pmc_mtxpool, pm);
1476                         newvalue = PMC_PCPU_SAVED(cpu, ri) =
1477                             pm->pm_gv.pm_savedvalue;
1478                         mtx_pool_unlock_spin(pmc_mtxpool, pm);
1479                 }
1480
1481                 PMCDBG3(CSW,SWI,1,"cpu=%d ri=%d new=%jd", cpu, ri, newvalue);
1482
1483                 pcd->pcd_write_pmc(cpu, adjri, newvalue);
1484
1485                 /* If a sampling mode PMC, reset stalled state. */
1486                 if (PMC_TO_MODE(pm) == PMC_MODE_TS)
1487                         pm->pm_pcpu_state[cpu].pps_stalled = 0;
1488
1489                 /* Indicate that we desire this to run. */
1490                 pm->pm_pcpu_state[cpu].pps_cpustate = 1;
1491
1492                 /* Start the PMC. */
1493                 pcd->pcd_start_pmc(cpu, adjri);
1494         }
1495
1496         /*
1497          * perform any other architecture/cpu dependent thread
1498          * switch-in actions.
1499          */
1500
1501         (void) (*md->pmd_switch_in)(pc, pp);
1502
1503         critical_exit();
1504
1505 }
1506
1507 /*
1508  * Thread context switch OUT.
1509  */
1510
1511 static void
1512 pmc_process_csw_out(struct thread *td)
1513 {
1514         int cpu;
1515         int64_t tmp;
1516         struct pmc *pm;
1517         struct proc *p;
1518         enum pmc_mode mode;
1519         struct pmc_cpu *pc;
1520         pmc_value_t newvalue;
1521         unsigned int adjri, ri;
1522         struct pmc_process *pp;
1523         struct pmc_thread *pt = NULL;
1524         struct pmc_classdep *pcd;
1525
1526
1527         /*
1528          * Locate our process descriptor; this may be NULL if
1529          * this process is exiting and we have already removed
1530          * the process from the target process table.
1531          *
1532          * Note that due to kernel preemption, multiple
1533          * context switches may happen while the process is
1534          * exiting.
1535          *
1536          * Note also that if the target process cannot be
1537          * found we still need to deconfigure any PMCs that
1538          * are currently running on hardware.
1539          */
1540
1541         p = td->td_proc;
1542         pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE);
1543
1544         /*
1545          * save PMCs
1546          */
1547
1548         critical_enter();
1549
1550         cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1551
1552         PMCDBG5(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1553             p->p_pid, p->p_comm, pp);
1554
1555         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1556             ("[pmc,%d weird CPU id %d", __LINE__, cpu));
1557
1558         pc = pmc_pcpu[cpu];
1559
1560         /*
1561          * When a PMC gets unlinked from a target PMC, it will
1562          * be removed from the target's pp_pmc[] array.
1563          *
1564          * However, on a MP system, the target could have been
1565          * executing on another CPU at the time of the unlink.
1566          * So, at context switch OUT time, we need to look at
1567          * the hardware to determine if a PMC is scheduled on
1568          * it.
1569          */
1570
1571         for (ri = 0; ri < md->pmd_npmc; ri++) {
1572
1573                 pcd = pmc_ri_to_classdep(md, ri, &adjri);
1574                 pm  = NULL;
1575                 (void) (*pcd->pcd_get_config)(cpu, adjri, &pm);
1576
1577                 if (pm == NULL) /* nothing at this row index */
1578                         continue;
1579
1580                 mode = PMC_TO_MODE(pm);
1581                 if (!PMC_IS_VIRTUAL_MODE(mode))
1582                         continue; /* not a process virtual PMC */
1583
1584                 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1585                     ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
1586                         __LINE__, PMC_TO_ROWINDEX(pm), ri));
1587
1588                 /*
1589                  * Change desired state, and then stop if not stalled.
1590                  * This two-step dance should avoid race conditions where
1591                  * an interrupt re-enables the PMC after this code has
1592                  * already checked the pm_stalled flag.
1593                  */
1594                 pm->pm_pcpu_state[cpu].pps_cpustate = 0;
1595                 if (pm->pm_pcpu_state[cpu].pps_stalled == 0)
1596                         pcd->pcd_stop_pmc(cpu, adjri);
1597
1598                 /* reduce this PMC's runcount */
1599                 counter_u64_add(pm->pm_runcount, -1);
1600
1601                 /*
1602                  * If this PMC is associated with this process,
1603                  * save the reading.
1604                  */
1605
1606                 if (pm->pm_state != PMC_STATE_DELETED && pp != NULL &&
1607                     pp->pp_pmcs[ri].pp_pmc != NULL) {
1608                         KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
1609                             ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__,
1610                                 pm, ri, pp->pp_pmcs[ri].pp_pmc));
1611
1612                         KASSERT(pp->pp_refcnt > 0,
1613                             ("[pmc,%d] pp refcnt = %d", __LINE__,
1614                                 pp->pp_refcnt));
1615
1616                         pcd->pcd_read_pmc(cpu, adjri, &newvalue);
1617
1618                         if (mode == PMC_MODE_TS) {
1619                                 PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d val=%jd (samp)",
1620                                     cpu, ri, newvalue);
1621
1622                                 if (pt == NULL)
1623                                         pt = pmc_find_thread_descriptor(pp, td,
1624                                             PMC_FLAG_NONE);
1625
1626                                 KASSERT(pt != NULL,
1627                                     ("[pmc,%d] No thread found for td=%p",
1628                                     __LINE__, td));
1629
1630                                 mtx_pool_lock_spin(pmc_mtxpool, pm);
1631
1632                                 /*
1633                                  * If we have a thread descriptor, save the
1634                                  * per-thread counter in the descriptor. If not,
1635                                  * we will update the per-process counter.
1636                                  *
1637                                  * TODO: Remove the per-process "safety net"
1638                                  * once we have thoroughly tested that we
1639                                  * don't hit the above assert.
1640                                  */
1641                                 if (pt != NULL)
1642                                         pt->pt_pmcs[ri].pt_pmcval = newvalue;
1643                                 else {
1644                                         /*
1645                                          * For sampling process-virtual PMCs,
1646                                          * newvalue is the number of events to
1647                                          * be seen until the next sampling
1648                                          * interrupt. We can just add the events
1649                                          * left from this invocation to the
1650                                          * counter, then adjust in case we
1651                                          * overflow our range.
1652                                          *
1653                                          * (Recall that we reload the counter
1654                                          * every time we use it.)
1655                                          */
1656                                         pp->pp_pmcs[ri].pp_pmcval += newvalue;
1657                                         if (pp->pp_pmcs[ri].pp_pmcval >
1658                                             pm->pm_sc.pm_reloadcount)
1659                                                 pp->pp_pmcs[ri].pp_pmcval -=
1660                                                     pm->pm_sc.pm_reloadcount;
1661                                 }
1662                                 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1663                         } else {
1664                                 tmp = newvalue - PMC_PCPU_SAVED(cpu,ri);
1665
1666                                 PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d tmp=%jd (count)",
1667                                     cpu, ri, tmp);
1668
1669                                 /*
1670                                  * For counting process-virtual PMCs,
1671                                  * we expect the count to be
1672                                  * increasing monotonically, modulo a 64
1673                                  * bit wraparound.
1674                                  */
1675                                 KASSERT(tmp >= 0,
1676                                     ("[pmc,%d] negative increment cpu=%d "
1677                                      "ri=%d newvalue=%jx saved=%jx "
1678                                      "incr=%jx", __LINE__, cpu, ri,
1679                                      newvalue, PMC_PCPU_SAVED(cpu,ri), tmp));
1680
1681                                 mtx_pool_lock_spin(pmc_mtxpool, pm);
1682                                 pm->pm_gv.pm_savedvalue += tmp;
1683                                 pp->pp_pmcs[ri].pp_pmcval += tmp;
1684                                 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1685
1686                                 if (pm->pm_flags & PMC_F_LOG_PROCCSW)
1687                                         pmclog_process_proccsw(pm, pp, tmp, td);
1688                         }
1689                 }
1690
1691                 /* mark hardware as free */
1692                 pcd->pcd_config_pmc(cpu, adjri, NULL);
1693         }
1694
1695         /*
1696          * perform any other architecture/cpu dependent thread
1697          * switch out functions.
1698          */
1699
1700         (void) (*md->pmd_switch_out)(pc, pp);
1701
1702         critical_exit();
1703 }
1704
1705 /*
1706  * A new thread for a process.
1707  */
1708 static void
1709 pmc_process_thread_add(struct thread *td)
1710 {
1711         struct pmc_process *pmc;
1712
1713         pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE);
1714         if (pmc != NULL)
1715                 pmc_find_thread_descriptor(pmc, td, PMC_FLAG_ALLOCATE);
1716 }
1717
1718 /*
1719  * A thread delete for a process.
1720  */
1721 static void
1722 pmc_process_thread_delete(struct thread *td)
1723 {
1724         struct pmc_process *pmc;
1725
1726         pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE);
1727         if (pmc != NULL)
1728                 pmc_thread_descriptor_pool_free(pmc_find_thread_descriptor(pmc,
1729                     td, PMC_FLAG_REMOVE));
1730 }
1731
1732 /*
1733  * A userret() call for a thread.
1734  */
1735 static void
1736 pmc_process_thread_userret(struct thread *td)
1737 {
1738         sched_pin();
1739         pmc_capture_user_callchain(curcpu, PMC_UR, td->td_frame);
1740         sched_unpin();
1741 }
1742
1743 /*
1744  * A mapping change for a process.
1745  */
1746
1747 static void
1748 pmc_process_mmap(struct thread *td, struct pmckern_map_in *pkm)
1749 {
1750         int ri;
1751         pid_t pid;
1752         char *fullpath, *freepath;
1753         const struct pmc *pm;
1754         struct pmc_owner *po;
1755         const struct pmc_process *pp;
1756
1757         freepath = fullpath = NULL;
1758         MPASS(!in_epoch(global_epoch_preempt));
1759         pmc_getfilename((struct vnode *) pkm->pm_file, &fullpath, &freepath);
1760
1761         pid = td->td_proc->p_pid;
1762
1763         PMC_EPOCH_ENTER();
1764         /* Inform owners of all system-wide sampling PMCs. */
1765         CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1766             if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1767                         pmclog_process_map_in(po, pid, pkm->pm_address, fullpath);
1768
1769         if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1770                 goto done;
1771
1772         /*
1773          * Inform sampling PMC owners tracking this process.
1774          */
1775         for (ri = 0; ri < md->pmd_npmc; ri++)
1776                 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
1777                     PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1778                         pmclog_process_map_in(pm->pm_owner,
1779                             pid, pkm->pm_address, fullpath);
1780
1781   done:
1782         if (freepath)
1783                 free(freepath, M_TEMP);
1784         PMC_EPOCH_EXIT();
1785 }
1786
1787
1788 /*
1789  * Log an munmap request.
1790  */
1791
1792 static void
1793 pmc_process_munmap(struct thread *td, struct pmckern_map_out *pkm)
1794 {
1795         int ri;
1796         pid_t pid;
1797         struct pmc_owner *po;
1798         const struct pmc *pm;
1799         const struct pmc_process *pp;
1800
1801         pid = td->td_proc->p_pid;
1802
1803         PMC_EPOCH_ENTER();
1804         CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1805             if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1806                 pmclog_process_map_out(po, pid, pkm->pm_address,
1807                     pkm->pm_address + pkm->pm_size);
1808         PMC_EPOCH_EXIT();
1809
1810         if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1811                 return;
1812
1813         for (ri = 0; ri < md->pmd_npmc; ri++)
1814                 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
1815                     PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1816                         pmclog_process_map_out(pm->pm_owner, pid,
1817                             pkm->pm_address, pkm->pm_address + pkm->pm_size);
1818 }
1819
1820 /*
1821  * Log mapping information about the kernel.
1822  */
1823
1824 static void
1825 pmc_log_kernel_mappings(struct pmc *pm)
1826 {
1827         struct pmc_owner *po;
1828         struct pmckern_map_in *km, *kmbase;
1829
1830         MPASS(in_epoch(global_epoch_preempt) || sx_xlocked(&pmc_sx));
1831         KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
1832             ("[pmc,%d] non-sampling PMC (%p) desires mapping information",
1833                 __LINE__, (void *) pm));
1834
1835         po = pm->pm_owner;
1836
1837         if (po->po_flags & PMC_PO_INITIAL_MAPPINGS_DONE)
1838                 return;
1839         if (PMC_TO_MODE(pm) == PMC_MODE_SS)
1840                 pmc_process_allproc(pm);
1841         /*
1842          * Log the current set of kernel modules.
1843          */
1844         kmbase = linker_hwpmc_list_objects();
1845         for (km = kmbase; km->pm_file != NULL; km++) {
1846                 PMCDBG2(LOG,REG,1,"%s %p", (char *) km->pm_file,
1847                     (void *) km->pm_address);
1848                 pmclog_process_map_in(po, (pid_t) -1, km->pm_address,
1849                     km->pm_file);
1850         }
1851         free(kmbase, M_LINKER);
1852
1853         po->po_flags |= PMC_PO_INITIAL_MAPPINGS_DONE;
1854 }
1855
1856 /*
1857  * Log the mappings for a single process.
1858  */
1859
1860 static void
1861 pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
1862 {
1863         vm_map_t map;
1864         struct vnode *vp;
1865         struct vmspace *vm;
1866         vm_map_entry_t entry;
1867         vm_offset_t last_end;
1868         u_int last_timestamp;
1869         struct vnode *last_vp;
1870         vm_offset_t start_addr;
1871         vm_object_t obj, lobj, tobj;
1872         char *fullpath, *freepath;
1873
1874         last_vp = NULL;
1875         last_end = (vm_offset_t) 0;
1876         fullpath = freepath = NULL;
1877
1878         if ((vm = vmspace_acquire_ref(p)) == NULL)
1879                 return;
1880
1881         map = &vm->vm_map;
1882         vm_map_lock_read(map);
1883
1884         for (entry = map->header.next; entry != &map->header; entry = entry->next) {
1885
1886                 if (entry == NULL) {
1887                         PMCDBG2(LOG,OPS,2, "hwpmc: vm_map entry unexpectedly "
1888                             "NULL! pid=%d vm_map=%p\n", p->p_pid, map);
1889                         break;
1890                 }
1891
1892                 /*
1893                  * We only care about executable map entries.
1894                  */
1895                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) ||
1896                     !(entry->protection & VM_PROT_EXECUTE) ||
1897                     (entry->object.vm_object == NULL)) {
1898                         continue;
1899                 }
1900
1901                 obj = entry->object.vm_object;
1902                 VM_OBJECT_RLOCK(obj);
1903
1904                 /* 
1905                  * Walk the backing_object list to find the base
1906                  * (non-shadowed) vm_object.
1907                  */
1908                 for (lobj = tobj = obj; tobj != NULL; tobj = tobj->backing_object) {
1909                         if (tobj != obj)
1910                                 VM_OBJECT_RLOCK(tobj);
1911                         if (lobj != obj)
1912                                 VM_OBJECT_RUNLOCK(lobj);
1913                         lobj = tobj;
1914                 }
1915
1916                 /*
1917                  * At this point lobj is the base vm_object and it is locked.
1918                  */
1919                 if (lobj == NULL) {
1920                         PMCDBG3(LOG,OPS,2, "hwpmc: lobj unexpectedly NULL! pid=%d "
1921                             "vm_map=%p vm_obj=%p\n", p->p_pid, map, obj);
1922                         VM_OBJECT_RUNLOCK(obj);
1923                         continue;
1924                 }
1925
1926                 vp = vm_object_vnode(lobj);
1927                 if (vp == NULL) {
1928                         if (lobj != obj)
1929                                 VM_OBJECT_RUNLOCK(lobj);
1930                         VM_OBJECT_RUNLOCK(obj);
1931                         continue;
1932                 }
1933
1934                 /*
1935                  * Skip contiguous regions that point to the same
1936                  * vnode, so we don't emit redundant MAP-IN
1937                  * directives.
1938                  */
1939                 if (entry->start == last_end && vp == last_vp) {
1940                         last_end = entry->end;
1941                         if (lobj != obj)
1942                                 VM_OBJECT_RUNLOCK(lobj);
1943                         VM_OBJECT_RUNLOCK(obj);
1944                         continue;
1945                 }
1946
1947                 /* 
1948                  * We don't want to keep the proc's vm_map or this
1949                  * vm_object locked while we walk the pathname, since
1950                  * vn_fullpath() can sleep.  However, if we drop the
1951                  * lock, it's possible for concurrent activity to
1952                  * modify the vm_map list.  To protect against this,
1953                  * we save the vm_map timestamp before we release the
1954                  * lock, and check it after we reacquire the lock
1955                  * below.
1956                  */
1957                 start_addr = entry->start;
1958                 last_end = entry->end;
1959                 last_timestamp = map->timestamp;
1960                 vm_map_unlock_read(map);
1961
1962                 vref(vp);
1963                 if (lobj != obj)
1964                         VM_OBJECT_RUNLOCK(lobj);
1965
1966                 VM_OBJECT_RUNLOCK(obj);
1967
1968                 freepath = NULL;
1969                 pmc_getfilename(vp, &fullpath, &freepath);
1970                 last_vp = vp;
1971
1972                 vrele(vp);
1973
1974                 vp = NULL;
1975                 pmclog_process_map_in(po, p->p_pid, start_addr, fullpath);
1976                 if (freepath)
1977                         free(freepath, M_TEMP);
1978
1979                 vm_map_lock_read(map);
1980
1981                 /*
1982                  * If our saved timestamp doesn't match, this means
1983                  * that the vm_map was modified out from under us and
1984                  * we can't trust our current "entry" pointer.  Do a
1985                  * new lookup for this entry.  If there is no entry
1986                  * for this address range, vm_map_lookup_entry() will
1987                  * return the previous one, so we always want to go to
1988                  * entry->next on the next loop iteration.
1989                  * 
1990                  * There is an edge condition here that can occur if
1991                  * there is no entry at or before this address.  In
1992                  * this situation, vm_map_lookup_entry returns
1993                  * &map->header, which would cause our loop to abort
1994                  * without processing the rest of the map.  However,
1995                  * in practice this will never happen for process
1996                  * vm_map.  This is because the executable's text
1997                  * segment is the first mapping in the proc's address
1998                  * space, and this mapping is never removed until the
1999                  * process exits, so there will always be a non-header
2000                  * entry at or before the requested address for
2001                  * vm_map_lookup_entry to return.
2002                  */
2003                 if (map->timestamp != last_timestamp)
2004                         vm_map_lookup_entry(map, last_end - 1, &entry);
2005         }
2006
2007         vm_map_unlock_read(map);
2008         vmspace_free(vm);
2009         return;
2010 }
2011
2012 /*
2013  * Log mappings for all processes in the system.
2014  */
2015
2016 static void
2017 pmc_log_all_process_mappings(struct pmc_owner *po)
2018 {
2019         struct proc *p, *top;
2020
2021         sx_assert(&pmc_sx, SX_XLOCKED);
2022
2023         if ((p = pfind(1)) == NULL)
2024                 panic("[pmc,%d] Cannot find init", __LINE__);
2025
2026         PROC_UNLOCK(p);
2027
2028         sx_slock(&proctree_lock);
2029
2030         top = p;
2031
2032         for (;;) {
2033                 pmc_log_process_mappings(po, p);
2034                 if (!LIST_EMPTY(&p->p_children))
2035                         p = LIST_FIRST(&p->p_children);
2036                 else for (;;) {
2037                         if (p == top)
2038                                 goto done;
2039                         if (LIST_NEXT(p, p_sibling)) {
2040                                 p = LIST_NEXT(p, p_sibling);
2041                                 break;
2042                         }
2043                         p = p->p_pptr;
2044                 }
2045         }
2046  done:
2047         sx_sunlock(&proctree_lock);
2048 }
2049
2050 /*
2051  * The 'hook' invoked from the kernel proper
2052  */
2053
2054
2055 #ifdef  HWPMC_DEBUG
2056 const char *pmc_hooknames[] = {
2057         /* these strings correspond to PMC_FN_* in <sys/pmckern.h> */
2058         "",
2059         "EXEC",
2060         "CSW-IN",
2061         "CSW-OUT",
2062         "SAMPLE",
2063         "UNUSED1",
2064         "UNUSED2",
2065         "MMAP",
2066         "MUNMAP",
2067         "CALLCHAIN-NMI",
2068         "CALLCHAIN-SOFT",
2069         "SOFTSAMPLING",
2070         "THR-CREATE",
2071         "THR-EXIT",
2072         "THR-USERRET",
2073         "THR-CREATE-LOG",
2074         "THR-EXIT-LOG",
2075         "PROC-CREATE-LOG"
2076 };
2077 #endif
2078
2079 static int
2080 pmc_hook_handler(struct thread *td, int function, void *arg)
2081 {
2082         int cpu;
2083
2084         PMCDBG4(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function,
2085             pmc_hooknames[function], arg);
2086
2087         switch (function)
2088         {
2089
2090         /*
2091          * Process exec()
2092          */
2093
2094         case PMC_FN_PROCESS_EXEC:
2095         {
2096                 char *fullpath, *freepath;
2097                 unsigned int ri;
2098                 int is_using_hwpmcs;
2099                 struct pmc *pm;
2100                 struct proc *p;
2101                 struct pmc_owner *po;
2102                 struct pmc_process *pp;
2103                 struct pmckern_procexec *pk;
2104
2105                 sx_assert(&pmc_sx, SX_XLOCKED);
2106
2107                 p = td->td_proc;
2108                 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
2109
2110                 pk = (struct pmckern_procexec *) arg;
2111
2112                 PMC_EPOCH_ENTER();
2113                 /* Inform owners of SS mode PMCs of the exec event. */
2114                 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
2115                     if (po->po_flags & PMC_PO_OWNS_LOGFILE)
2116                             pmclog_process_procexec(po, PMC_ID_INVALID,
2117                                 p->p_pid, pk->pm_entryaddr, fullpath);
2118                 PMC_EPOCH_EXIT();
2119
2120                 PROC_LOCK(p);
2121                 is_using_hwpmcs = p->p_flag & P_HWPMC;
2122                 PROC_UNLOCK(p);
2123
2124                 if (!is_using_hwpmcs) {
2125                         if (freepath)
2126                                 free(freepath, M_TEMP);
2127                         break;
2128                 }
2129
2130                 /*
2131                  * PMCs are not inherited across an exec():  remove any
2132                  * PMCs that this process is the owner of.
2133                  */
2134
2135                 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
2136                         pmc_remove_owner(po);
2137                         pmc_destroy_owner_descriptor(po);
2138                 }
2139
2140                 /*
2141                  * If the process being exec'ed is not the target of any
2142                  * PMC, we are done.
2143                  */
2144                 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL) {
2145                         if (freepath)
2146                                 free(freepath, M_TEMP);
2147                         break;
2148                 }
2149
2150                 /*
2151                  * Log the exec event to all monitoring owners.  Skip
2152                  * owners who have already received the event because
2153                  * they had system sampling PMCs active.
2154                  */
2155                 for (ri = 0; ri < md->pmd_npmc; ri++)
2156                         if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
2157                                 po = pm->pm_owner;
2158                                 if (po->po_sscount == 0 &&
2159                                     po->po_flags & PMC_PO_OWNS_LOGFILE)
2160                                         pmclog_process_procexec(po, pm->pm_id,
2161                                             p->p_pid, pk->pm_entryaddr,
2162                                             fullpath);
2163                         }
2164
2165                 if (freepath)
2166                         free(freepath, M_TEMP);
2167
2168
2169                 PMCDBG4(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d",
2170                     p, p->p_pid, p->p_comm, pk->pm_credentialschanged);
2171
2172                 if (pk->pm_credentialschanged == 0) /* no change */
2173                         break;
2174
2175                 /*
2176                  * If the newly exec()'ed process has a different credential
2177                  * than before, allow it to be the target of a PMC only if
2178                  * the PMC's owner has sufficient privilege.
2179                  */
2180
2181                 for (ri = 0; ri < md->pmd_npmc; ri++)
2182                         if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL)
2183                                 if (pmc_can_attach(pm, td->td_proc) != 0)
2184                                         pmc_detach_one_process(td->td_proc,
2185                                             pm, PMC_FLAG_NONE);
2186
2187                 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc,
2188                     ("[pmc,%d] Illegal ref count %d on pp %p", __LINE__,
2189                         pp->pp_refcnt, pp));
2190
2191                 /*
2192                  * If this process is no longer the target of any
2193                  * PMCs, we can remove the process entry and free
2194                  * up space.
2195                  */
2196
2197                 if (pp->pp_refcnt == 0) {
2198                         pmc_remove_process_descriptor(pp);
2199                         pmc_destroy_process_descriptor(pp);
2200                         break;
2201                 }
2202
2203         }
2204         break;
2205
2206         case PMC_FN_CSW_IN:
2207                 pmc_process_csw_in(td);
2208                 break;
2209
2210         case PMC_FN_CSW_OUT:
2211                 pmc_process_csw_out(td);
2212                 break;
2213
2214         /*
2215          * Process accumulated PC samples.
2216          *
2217          * This function is expected to be called by hardclock() for
2218          * each CPU that has accumulated PC samples.
2219          *
2220          * This function is to be executed on the CPU whose samples
2221          * are being processed.
2222          */
2223         case PMC_FN_DO_SAMPLES:
2224
2225                 /*
2226                  * Clear the cpu specific bit in the CPU mask before
2227                  * do the rest of the processing.  If the NMI handler
2228                  * gets invoked after the "atomic_clear_int()" call
2229                  * below but before "pmc_process_samples()" gets
2230                  * around to processing the interrupt, then we will
2231                  * come back here at the next hardclock() tick (and
2232                  * may find nothing to do if "pmc_process_samples()"
2233                  * had already processed the interrupt).  We don't
2234                  * lose the interrupt sample.
2235                  */
2236                 DPCPU_SET(pmc_sampled, 0);
2237                 cpu = PCPU_GET(cpuid);
2238                 pmc_process_samples(cpu, PMC_HR);
2239                 pmc_process_samples(cpu, PMC_SR);
2240                 pmc_process_samples(cpu, PMC_UR);
2241                 break;
2242
2243         case PMC_FN_MMAP:
2244                 pmc_process_mmap(td, (struct pmckern_map_in *) arg);
2245                 break;
2246
2247         case PMC_FN_MUNMAP:
2248                 MPASS(in_epoch(global_epoch_preempt) || sx_xlocked(&pmc_sx));
2249                 pmc_process_munmap(td, (struct pmckern_map_out *) arg);
2250                 break;
2251
2252         case PMC_FN_PROC_CREATE_LOG:
2253                 pmc_process_proccreate((struct proc *)arg);
2254                 break;
2255
2256         case PMC_FN_USER_CALLCHAIN:
2257                 /*
2258                  * Record a call chain.
2259                  */
2260                 KASSERT(td == curthread, ("[pmc,%d] td != curthread",
2261                     __LINE__));
2262
2263                 pmc_capture_user_callchain(PCPU_GET(cpuid), PMC_HR,
2264                     (struct trapframe *) arg);
2265
2266                 KASSERT(td->td_pinned == 1,
2267                         ("[pmc,%d] invalid td_pinned value", __LINE__));
2268                 sched_unpin();  /* Can migrate safely now. */
2269
2270                 td->td_pflags &= ~TDP_CALLCHAIN;
2271                 break;
2272
2273         case PMC_FN_USER_CALLCHAIN_SOFT:
2274                 /*
2275                  * Record a call chain.
2276                  */
2277                 KASSERT(td == curthread, ("[pmc,%d] td != curthread",
2278                     __LINE__));
2279
2280                 cpu = PCPU_GET(cpuid);
2281                 pmc_capture_user_callchain(cpu, PMC_SR,
2282                     (struct trapframe *) arg);
2283
2284                 KASSERT(td->td_pinned == 1,
2285                     ("[pmc,%d] invalid td_pinned value", __LINE__));
2286
2287                 sched_unpin();  /* Can migrate safely now. */
2288
2289                 td->td_pflags &= ~TDP_CALLCHAIN;
2290                 break;
2291
2292         case PMC_FN_SOFT_SAMPLING:
2293                 /*
2294                  * Call soft PMC sampling intr.
2295                  */
2296                 pmc_soft_intr((struct pmckern_soft *) arg);
2297                 break;
2298
2299         case PMC_FN_THR_CREATE:
2300                 pmc_process_thread_add(td);
2301                 pmc_process_threadcreate(td);
2302                 break;
2303
2304         case PMC_FN_THR_CREATE_LOG:
2305                 pmc_process_threadcreate(td);
2306                 break;
2307
2308         case PMC_FN_THR_EXIT:
2309                 KASSERT(td == curthread, ("[pmc,%d] td != curthread",
2310                     __LINE__));
2311                 pmc_process_thread_delete(td);
2312                 pmc_process_threadexit(td);
2313                 break;
2314         case PMC_FN_THR_EXIT_LOG:
2315                 pmc_process_threadexit(td);
2316                 break;
2317         case PMC_FN_THR_USERRET:
2318                 KASSERT(td == curthread, ("[pmc,%d] td != curthread",
2319                     __LINE__));
2320                 pmc_process_thread_userret(td);
2321                 break;
2322
2323         default:
2324 #ifdef  HWPMC_DEBUG
2325                 KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function));
2326 #endif
2327                 break;
2328
2329         }
2330
2331         return 0;
2332 }
2333
2334 /*
2335  * allocate a 'struct pmc_owner' descriptor in the owner hash table.
2336  */
2337
2338 static struct pmc_owner *
2339 pmc_allocate_owner_descriptor(struct proc *p)
2340 {
2341         uint32_t hindex;
2342         struct pmc_owner *po;
2343         struct pmc_ownerhash *poh;
2344
2345         hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
2346         poh = &pmc_ownerhash[hindex];
2347
2348         /* allocate space for N pointers and one descriptor struct */
2349         po = malloc(sizeof(struct pmc_owner), M_PMC, M_WAITOK|M_ZERO);
2350         po->po_owner = p;
2351         LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */
2352
2353         TAILQ_INIT(&po->po_logbuffers);
2354         mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc-per-proc", MTX_SPIN);
2355
2356         PMCDBG4(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p",
2357             p, p->p_pid, p->p_comm, po);
2358
2359         return po;
2360 }
2361
2362 static void
2363 pmc_destroy_owner_descriptor(struct pmc_owner *po)
2364 {
2365
2366         PMCDBG4(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)",
2367             po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm);
2368
2369         mtx_destroy(&po->po_mtx);
2370         free(po, M_PMC);
2371 }
2372
2373 /*
2374  * Allocate a thread descriptor from the free pool.
2375  *
2376  * NOTE: This *can* return NULL.
2377  */
2378 static struct pmc_thread *
2379 pmc_thread_descriptor_pool_alloc(void)
2380 {
2381         struct pmc_thread *pt;
2382
2383         mtx_lock_spin(&pmc_threadfreelist_mtx);
2384         if ((pt = LIST_FIRST(&pmc_threadfreelist)) != NULL) {
2385                 LIST_REMOVE(pt, pt_next);
2386                 pmc_threadfreelist_entries--;
2387         }
2388         mtx_unlock_spin(&pmc_threadfreelist_mtx);
2389
2390         return (pt);
2391 }
2392
2393 /*
2394  * Add a thread descriptor to the free pool. We use this instead of free()
2395  * to maintain a cache of free entries. Additionally, we can safely call
2396  * this function when we cannot call free(), such as in a critical section.
2397  * 
2398  */
2399 static void
2400 pmc_thread_descriptor_pool_free(struct pmc_thread *pt)
2401 {
2402
2403         if (pt == NULL)
2404                 return;
2405
2406         memset(pt, 0, THREADENTRY_SIZE);
2407         mtx_lock_spin(&pmc_threadfreelist_mtx);
2408         LIST_INSERT_HEAD(&pmc_threadfreelist, pt, pt_next);
2409         pmc_threadfreelist_entries++;
2410         if (pmc_threadfreelist_entries > pmc_threadfreelist_max)
2411                 GROUPTASK_ENQUEUE(&free_gtask);
2412         mtx_unlock_spin(&pmc_threadfreelist_mtx);
2413 }
2414
2415 /*
2416  * A callout to manage the free list.
2417  */
2418 static void
2419 pmc_thread_descriptor_pool_free_task(void *arg __unused)
2420 {
2421         struct pmc_thread *pt;
2422         LIST_HEAD(, pmc_thread) tmplist;
2423         int delta;
2424
2425         LIST_INIT(&tmplist);
2426         /* Determine what changes, if any, we need to make. */
2427         mtx_lock_spin(&pmc_threadfreelist_mtx);
2428         delta = pmc_threadfreelist_entries - pmc_threadfreelist_max;
2429         while (delta > 0 &&
2430                    (pt = LIST_FIRST(&pmc_threadfreelist)) != NULL) {
2431                 delta--;
2432                 LIST_REMOVE(pt, pt_next);
2433                 LIST_INSERT_HEAD(&tmplist, pt, pt_next);
2434         }
2435         mtx_unlock_spin(&pmc_threadfreelist_mtx);
2436
2437         /* If there are entries to free, free them. */
2438         while (!LIST_EMPTY(&tmplist)) {
2439                 pt = LIST_FIRST(&tmplist);
2440                 LIST_REMOVE(pt, pt_next);
2441                 free(pt, M_PMC);
2442         }
2443 }
2444
2445 /*
2446  * Drain the thread free pool, freeing all allocations.
2447  */
2448 static void
2449 pmc_thread_descriptor_pool_drain()
2450 {
2451         struct pmc_thread *pt, *next;
2452
2453         LIST_FOREACH_SAFE(pt, &pmc_threadfreelist, pt_next, next) {
2454                 LIST_REMOVE(pt, pt_next);
2455                 free(pt, M_PMC);
2456         }
2457 }
2458
2459 /*
2460  * find the descriptor corresponding to thread 'td', adding or removing it
2461  * as specified by 'mode'.
2462  *
2463  * Note that this supports additional mode flags in addition to those
2464  * supported by pmc_find_process_descriptor():
2465  * PMC_FLAG_NOWAIT: Causes the function to not wait for mallocs.
2466  *     This makes it safe to call while holding certain other locks.
2467  */
2468
2469 static struct pmc_thread *
2470 pmc_find_thread_descriptor(struct pmc_process *pp, struct thread *td,
2471     uint32_t mode)
2472 {
2473         struct pmc_thread *pt = NULL, *ptnew = NULL;
2474         int wait_flag;
2475
2476         KASSERT(td != NULL, ("[pmc,%d] called to add NULL td", __LINE__));
2477
2478         /*
2479          * Pre-allocate memory in the PMC_FLAG_ALLOCATE case prior to
2480          * acquiring the lock.
2481          */
2482         if (mode & PMC_FLAG_ALLOCATE) {
2483                 if ((ptnew = pmc_thread_descriptor_pool_alloc()) == NULL) {
2484                         wait_flag = M_WAITOK;
2485                         if ((mode & PMC_FLAG_NOWAIT) || in_epoch(global_epoch_preempt))
2486                                 wait_flag = M_NOWAIT;
2487
2488                         ptnew = malloc(THREADENTRY_SIZE, M_PMC,
2489                             wait_flag|M_ZERO);
2490                 }
2491         }
2492
2493         mtx_lock_spin(pp->pp_tdslock);
2494
2495         LIST_FOREACH(pt, &pp->pp_tds, pt_next)
2496                 if (pt->pt_td == td)
2497                         break;
2498
2499         if ((mode & PMC_FLAG_REMOVE) && pt != NULL)
2500                 LIST_REMOVE(pt, pt_next);
2501
2502         if ((mode & PMC_FLAG_ALLOCATE) && pt == NULL && ptnew != NULL) {
2503                 pt = ptnew;
2504                 ptnew = NULL;
2505                 pt->pt_td = td;
2506                 LIST_INSERT_HEAD(&pp->pp_tds, pt, pt_next);
2507         }
2508
2509         mtx_unlock_spin(pp->pp_tdslock);
2510
2511         if (ptnew != NULL) {
2512                 free(ptnew, M_PMC);
2513         }
2514
2515         return pt;
2516 }
2517
2518 /*
2519  * Try to add thread descriptors for each thread in a process.
2520  */
2521
2522 static void
2523 pmc_add_thread_descriptors_from_proc(struct proc *p, struct pmc_process *pp)
2524 {
2525         struct thread *curtd;
2526         struct pmc_thread **tdlist;
2527         int i, tdcnt, tdlistsz;
2528
2529         KASSERT(!PROC_LOCKED(p), ("[pmc,%d] proc unexpectedly locked",
2530             __LINE__));
2531         tdcnt = 32;
2532  restart:
2533         tdlistsz = roundup2(tdcnt, 32);
2534
2535         tdcnt = 0;
2536         tdlist = malloc(sizeof(struct pmc_thread*) * tdlistsz, M_TEMP, M_WAITOK);
2537
2538         PROC_LOCK(p);
2539         FOREACH_THREAD_IN_PROC(p, curtd)
2540                 tdcnt++;
2541         if (tdcnt >= tdlistsz) {
2542                 PROC_UNLOCK(p);
2543                 free(tdlist, M_TEMP);
2544                 goto restart;
2545         }
2546         /*
2547          * Try to add each thread to the list without sleeping. If unable,
2548          * add to a queue to retry after dropping the process lock.
2549          */
2550         tdcnt = 0;
2551         FOREACH_THREAD_IN_PROC(p, curtd) {
2552                 tdlist[tdcnt] = pmc_find_thread_descriptor(pp, curtd,
2553                                                    PMC_FLAG_ALLOCATE|PMC_FLAG_NOWAIT);
2554                 if (tdlist[tdcnt] == NULL) {
2555                         PROC_UNLOCK(p);
2556                         for (i = 0; i <= tdcnt; i++)
2557                                 pmc_thread_descriptor_pool_free(tdlist[i]);
2558                         free(tdlist, M_TEMP);
2559                         goto restart;
2560                 }
2561                 tdcnt++;
2562         }
2563         PROC_UNLOCK(p);
2564         free(tdlist, M_TEMP);
2565 }
2566
2567 /*
2568  * find the descriptor corresponding to process 'p', adding or removing it
2569  * as specified by 'mode'.
2570  */
2571
2572 static struct pmc_process *
2573 pmc_find_process_descriptor(struct proc *p, uint32_t mode)
2574 {
2575         uint32_t hindex;
2576         struct pmc_process *pp, *ppnew;
2577         struct pmc_processhash *pph;
2578
2579         hindex = PMC_HASH_PTR(p, pmc_processhashmask);
2580         pph = &pmc_processhash[hindex];
2581
2582         ppnew = NULL;
2583
2584         /*
2585          * Pre-allocate memory in the PMC_FLAG_ALLOCATE case since we
2586          * cannot call malloc(9) once we hold a spin lock.
2587          */
2588         if (mode & PMC_FLAG_ALLOCATE)
2589                 ppnew = malloc(sizeof(struct pmc_process) + md->pmd_npmc *
2590                     sizeof(struct pmc_targetstate), M_PMC, M_WAITOK|M_ZERO);
2591
2592         mtx_lock_spin(&pmc_processhash_mtx);
2593         LIST_FOREACH(pp, pph, pp_next)
2594             if (pp->pp_proc == p)
2595                     break;
2596
2597         if ((mode & PMC_FLAG_REMOVE) && pp != NULL)
2598                 LIST_REMOVE(pp, pp_next);
2599
2600         if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL &&
2601             ppnew != NULL) {
2602                 ppnew->pp_proc = p;
2603                 LIST_INIT(&ppnew->pp_tds);
2604                 ppnew->pp_tdslock = mtx_pool_find(pmc_mtxpool, ppnew);
2605                 LIST_INSERT_HEAD(pph, ppnew, pp_next);
2606                 mtx_unlock_spin(&pmc_processhash_mtx);
2607                 pp = ppnew;
2608                 ppnew = NULL;
2609
2610                 /* Add thread descriptors for this process' current threads. */
2611                 pmc_add_thread_descriptors_from_proc(p, pp);
2612         }
2613         else
2614                 mtx_unlock_spin(&pmc_processhash_mtx);
2615
2616         if (ppnew != NULL)
2617                 free(ppnew, M_PMC);
2618
2619         return pp;
2620 }
2621
2622 /*
2623  * remove a process descriptor from the process hash table.
2624  */
2625
2626 static void
2627 pmc_remove_process_descriptor(struct pmc_process *pp)
2628 {
2629         KASSERT(pp->pp_refcnt == 0,
2630             ("[pmc,%d] Removing process descriptor %p with count %d",
2631                 __LINE__, pp, pp->pp_refcnt));
2632
2633         mtx_lock_spin(&pmc_processhash_mtx);
2634         LIST_REMOVE(pp, pp_next);
2635         mtx_unlock_spin(&pmc_processhash_mtx);
2636 }
2637
2638 /*
2639  * destroy a process descriptor.
2640  */
2641
2642 static void
2643 pmc_destroy_process_descriptor(struct pmc_process *pp)
2644 {
2645         struct pmc_thread *pmc_td;
2646
2647         while ((pmc_td = LIST_FIRST(&pp->pp_tds)) != NULL) {
2648                 LIST_REMOVE(pmc_td, pt_next);
2649                 pmc_thread_descriptor_pool_free(pmc_td);
2650         }
2651         free(pp, M_PMC);
2652 }
2653
2654
2655 /*
2656  * find an owner descriptor corresponding to proc 'p'
2657  */
2658
2659 static struct pmc_owner *
2660 pmc_find_owner_descriptor(struct proc *p)
2661 {
2662         uint32_t hindex;
2663         struct pmc_owner *po;
2664         struct pmc_ownerhash *poh;
2665
2666         hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
2667         poh = &pmc_ownerhash[hindex];
2668
2669         po = NULL;
2670         LIST_FOREACH(po, poh, po_next)
2671             if (po->po_owner == p)
2672                     break;
2673
2674         PMCDBG5(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> "
2675             "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po);
2676
2677         return po;
2678 }
2679
2680 /*
2681  * pmc_allocate_pmc_descriptor
2682  *
2683  * Allocate a pmc descriptor and initialize its
2684  * fields.
2685  */
2686
2687 static struct pmc *
2688 pmc_allocate_pmc_descriptor(void)
2689 {
2690         struct pmc *pmc;
2691
2692         pmc = malloc(sizeof(struct pmc), M_PMC, M_WAITOK|M_ZERO);
2693         pmc->pm_runcount = counter_u64_alloc(M_WAITOK);
2694         pmc->pm_pcpu_state = malloc(sizeof(struct pmc_pcpu_state)*mp_ncpus, M_PMC, M_WAITOK|M_ZERO);
2695         PMCDBG1(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc);
2696
2697         return pmc;
2698 }
2699
2700 /*
2701  * Destroy a pmc descriptor.
2702  */
2703
2704 static void
2705 pmc_destroy_pmc_descriptor(struct pmc *pm)
2706 {
2707
2708         KASSERT(pm->pm_state == PMC_STATE_DELETED ||
2709             pm->pm_state == PMC_STATE_FREE,
2710             ("[pmc,%d] destroying non-deleted PMC", __LINE__));
2711         KASSERT(LIST_EMPTY(&pm->pm_targets),
2712             ("[pmc,%d] destroying pmc with targets", __LINE__));
2713         KASSERT(pm->pm_owner == NULL,
2714             ("[pmc,%d] destroying pmc attached to an owner", __LINE__));
2715         KASSERT(counter_u64_fetch(pm->pm_runcount) == 0,
2716             ("[pmc,%d] pmc has non-zero run count %ld", __LINE__,
2717                  (unsigned long)counter_u64_fetch(pm->pm_runcount)));
2718
2719         counter_u64_free(pm->pm_runcount);
2720         free(pm->pm_pcpu_state, M_PMC);
2721         free(pm, M_PMC);
2722 }
2723
2724 static void
2725 pmc_wait_for_pmc_idle(struct pmc *pm)
2726 {
2727 #ifdef HWPMC_DEBUG
2728         volatile int maxloop;
2729
2730         maxloop = 100 * pmc_cpu_max();
2731 #endif
2732         /*
2733          * Loop (with a forced context switch) till the PMC's runcount
2734          * comes down to zero.
2735          */
2736         pmclog_flush(pm->pm_owner, 1);
2737         while (counter_u64_fetch(pm->pm_runcount) > 0) {
2738                 pmclog_flush(pm->pm_owner, 1);
2739 #ifdef HWPMC_DEBUG
2740                 maxloop--;
2741                 KASSERT(maxloop > 0,
2742                     ("[pmc,%d] (ri%d, rc%ld) waiting too long for "
2743                         "pmc to be free", __LINE__,
2744                          PMC_TO_ROWINDEX(pm), (unsigned long)counter_u64_fetch(pm->pm_runcount)));
2745 #endif
2746                 pmc_force_context_switch();
2747         }
2748 }
2749
2750 /*
2751  * This function does the following things:
2752  *
2753  *  - detaches the PMC from hardware
2754  *  - unlinks all target threads that were attached to it
2755  *  - removes the PMC from its owner's list
2756  *  - destroys the PMC private mutex
2757  *
2758  * Once this function completes, the given pmc pointer can be freed by
2759  * calling pmc_destroy_pmc_descriptor().
2760  */
2761
2762 static void
2763 pmc_release_pmc_descriptor(struct pmc *pm)
2764 {
2765         enum pmc_mode mode;
2766         struct pmc_hw *phw;
2767         u_int adjri, ri, cpu;
2768         struct pmc_owner *po;
2769         struct pmc_binding pb;
2770         struct pmc_process *pp;
2771         struct pmc_classdep *pcd;
2772         struct pmc_target *ptgt, *tmp;
2773
2774         sx_assert(&pmc_sx, SX_XLOCKED);
2775
2776         KASSERT(pm, ("[pmc,%d] null pmc", __LINE__));
2777
2778         ri   = PMC_TO_ROWINDEX(pm);
2779         pcd  = pmc_ri_to_classdep(md, ri, &adjri);
2780         mode = PMC_TO_MODE(pm);
2781
2782         PMCDBG3(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri,
2783             mode);
2784
2785         /*
2786          * First, we take the PMC off hardware.
2787          */
2788         cpu = 0;
2789         if (PMC_IS_SYSTEM_MODE(mode)) {
2790
2791                 /*
2792                  * A system mode PMC runs on a specific CPU.  Switch
2793                  * to this CPU and turn hardware off.
2794                  */
2795                 pmc_save_cpu_binding(&pb);
2796
2797                 cpu = PMC_TO_CPU(pm);
2798
2799                 pmc_select_cpu(cpu);
2800
2801                 /* switch off non-stalled CPUs */
2802                 pm->pm_pcpu_state[cpu].pps_cpustate = 0;
2803                 if (pm->pm_state == PMC_STATE_RUNNING &&
2804                         pm->pm_pcpu_state[cpu].pps_stalled == 0) {
2805
2806                         phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
2807
2808                         KASSERT(phw->phw_pmc == pm,
2809                             ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)",
2810                                 __LINE__, ri, phw->phw_pmc, pm));
2811                         PMCDBG2(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri);
2812
2813                         critical_enter();
2814                         pcd->pcd_stop_pmc(cpu, adjri);
2815                         critical_exit();
2816                 }
2817
2818                 PMCDBG2(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri);
2819
2820                 critical_enter();
2821                 pcd->pcd_config_pmc(cpu, adjri, NULL);
2822                 critical_exit();
2823
2824                 /* adjust the global and process count of SS mode PMCs */
2825                 if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) {
2826                         po = pm->pm_owner;
2827                         po->po_sscount--;
2828                         if (po->po_sscount == 0) {
2829                                 atomic_subtract_rel_int(&pmc_ss_count, 1);
2830                                 CK_LIST_REMOVE(po, po_ssnext);
2831                                 epoch_wait_preempt(global_epoch_preempt);
2832                         }
2833                 }
2834
2835                 pm->pm_state = PMC_STATE_DELETED;
2836
2837                 pmc_restore_cpu_binding(&pb);
2838
2839                 /*
2840                  * We could have references to this PMC structure in
2841                  * the per-cpu sample queues.  Wait for the queue to
2842                  * drain.
2843                  */
2844                 pmc_wait_for_pmc_idle(pm);
2845
2846         } else if (PMC_IS_VIRTUAL_MODE(mode)) {
2847
2848                 /*
2849                  * A virtual PMC could be running on multiple CPUs at
2850                  * a given instant.
2851                  *
2852                  * By marking its state as DELETED, we ensure that
2853                  * this PMC is never further scheduled on hardware.
2854                  *
2855                  * Then we wait till all CPUs are done with this PMC.
2856                  */
2857                 pm->pm_state = PMC_STATE_DELETED;
2858
2859
2860                 /* Wait for the PMCs runcount to come to zero. */
2861                 pmc_wait_for_pmc_idle(pm);
2862
2863                 /*
2864                  * At this point the PMC is off all CPUs and cannot be
2865                  * freshly scheduled onto a CPU.  It is now safe to
2866                  * unlink all targets from this PMC.  If a
2867                  * process-record's refcount falls to zero, we remove
2868                  * it from the hash table.  The module-wide SX lock
2869                  * protects us from races.
2870                  */
2871                 LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) {
2872                         pp = ptgt->pt_process;
2873                         pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */
2874
2875                         PMCDBG1(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt);
2876
2877                         /*
2878                          * If the target process record shows that no
2879                          * PMCs are attached to it, reclaim its space.
2880                          */
2881
2882                         if (pp->pp_refcnt == 0) {
2883                                 pmc_remove_process_descriptor(pp);
2884                                 pmc_destroy_process_descriptor(pp);
2885                         }
2886                 }
2887
2888                 cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */
2889
2890         }
2891
2892         /*
2893          * Release any MD resources
2894          */
2895         (void) pcd->pcd_release_pmc(cpu, adjri, pm);
2896
2897         /*
2898          * Update row disposition
2899          */
2900
2901         if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm)))
2902                 PMC_UNMARK_ROW_STANDALONE(ri);
2903         else
2904                 PMC_UNMARK_ROW_THREAD(ri);
2905
2906         /* unlink from the owner's list */
2907         if (pm->pm_owner) {
2908                 LIST_REMOVE(pm, pm_next);
2909                 pm->pm_owner = NULL;
2910         }
2911 }
2912
2913 /*
2914  * Register an owner and a pmc.
2915  */
2916
2917 static int
2918 pmc_register_owner(struct proc *p, struct pmc *pmc)
2919 {
2920         struct pmc_owner *po;
2921
2922         sx_assert(&pmc_sx, SX_XLOCKED);
2923
2924         if ((po = pmc_find_owner_descriptor(p)) == NULL)
2925                 if ((po = pmc_allocate_owner_descriptor(p)) == NULL)
2926                         return ENOMEM;
2927
2928         KASSERT(pmc->pm_owner == NULL,
2929             ("[pmc,%d] attempting to own an initialized PMC", __LINE__));
2930         pmc->pm_owner  = po;
2931
2932         LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next);
2933
2934         PROC_LOCK(p);
2935         p->p_flag |= P_HWPMC;
2936         PROC_UNLOCK(p);
2937
2938         if (po->po_flags & PMC_PO_OWNS_LOGFILE)
2939                 pmclog_process_pmcallocate(pmc);
2940
2941         PMCDBG2(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p",
2942             po, pmc);
2943
2944         return 0;
2945 }
2946
2947 /*
2948  * Return the current row disposition:
2949  * == 0 => FREE
2950  *  > 0 => PROCESS MODE
2951  *  < 0 => SYSTEM MODE
2952  */
2953
2954 int
2955 pmc_getrowdisp(int ri)
2956 {
2957         return pmc_pmcdisp[ri];
2958 }
2959
2960 /*
2961  * Check if a PMC at row index 'ri' can be allocated to the current
2962  * process.
2963  *
2964  * Allocation can fail if:
2965  *   - the current process is already being profiled by a PMC at index 'ri',
2966  *     attached to it via OP_PMCATTACH.
2967  *   - the current process has already allocated a PMC at index 'ri'
2968  *     via OP_ALLOCATE.
2969  */
2970
2971 static int
2972 pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu)
2973 {
2974         enum pmc_mode mode;
2975         struct pmc *pm;
2976         struct pmc_owner *po;
2977         struct pmc_process *pp;
2978
2979         PMCDBG5(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d "
2980             "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu);
2981
2982         /*
2983          * We shouldn't have already allocated a process-mode PMC at
2984          * row index 'ri'.
2985          *
2986          * We shouldn't have allocated a system-wide PMC on the same
2987          * CPU and same RI.
2988          */
2989         if ((po = pmc_find_owner_descriptor(p)) != NULL)
2990                 LIST_FOREACH(pm, &po->po_pmcs, pm_next) {
2991                     if (PMC_TO_ROWINDEX(pm) == ri) {
2992                             mode = PMC_TO_MODE(pm);
2993                             if (PMC_IS_VIRTUAL_MODE(mode))
2994                                     return EEXIST;
2995                             if (PMC_IS_SYSTEM_MODE(mode) &&
2996                                 (int) PMC_TO_CPU(pm) == cpu)
2997                                     return EEXIST;
2998                     }
2999                 }
3000
3001         /*
3002          * We also shouldn't be the target of any PMC at this index
3003          * since otherwise a PMC_ATTACH to ourselves will fail.
3004          */
3005         if ((pp = pmc_find_process_descriptor(p, 0)) != NULL)
3006                 if (pp->pp_pmcs[ri].pp_pmc)
3007                         return EEXIST;
3008
3009         PMCDBG4(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok",
3010             p, p->p_pid, p->p_comm, ri);
3011
3012         return 0;
3013 }
3014
3015 /*
3016  * Check if a given PMC at row index 'ri' can be currently used in
3017  * mode 'mode'.
3018  */
3019
3020 static int
3021 pmc_can_allocate_row(int ri, enum pmc_mode mode)
3022 {
3023         enum pmc_disp   disp;
3024
3025         sx_assert(&pmc_sx, SX_XLOCKED);
3026
3027         PMCDBG2(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode);
3028
3029         if (PMC_IS_SYSTEM_MODE(mode))
3030                 disp = PMC_DISP_STANDALONE;
3031         else
3032                 disp = PMC_DISP_THREAD;
3033
3034         /*
3035          * check disposition for PMC row 'ri':
3036          *
3037          * Expected disposition         Row-disposition         Result
3038          *
3039          * STANDALONE                   STANDALONE or FREE      proceed
3040          * STANDALONE                   THREAD                  fail
3041          * THREAD                       THREAD or FREE          proceed
3042          * THREAD                       STANDALONE              fail
3043          */
3044
3045         if (!PMC_ROW_DISP_IS_FREE(ri) &&
3046             !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)) &&
3047             !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri)))
3048                 return EBUSY;
3049
3050         /*
3051          * All OK
3052          */
3053
3054         PMCDBG2(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode);
3055
3056         return 0;
3057
3058 }
3059
3060 /*
3061  * Find a PMC descriptor with user handle 'pmcid' for thread 'td'.
3062  */
3063
3064 static struct pmc *
3065 pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid)
3066 {
3067         struct pmc *pm;
3068
3069         KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc,
3070             ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__,
3071                 PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc));
3072
3073         LIST_FOREACH(pm, &po->po_pmcs, pm_next)
3074             if (pm->pm_id == pmcid)
3075                     return pm;
3076
3077         return NULL;
3078 }
3079
3080 static int
3081 pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc)
3082 {
3083
3084         struct pmc *pm, *opm;
3085         struct pmc_owner *po;
3086         struct pmc_process *pp;
3087
3088         PMCDBG1(PMC,FND,1, "find-pmc id=%d", pmcid);
3089         if (PMC_ID_TO_ROWINDEX(pmcid) >= md->pmd_npmc)
3090                 return (EINVAL);
3091
3092         if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL) {
3093                 /*
3094                  * In case of PMC_F_DESCENDANTS child processes we will not find
3095                  * the current process in the owners hash list.  Find the owner
3096                  * process first and from there lookup the po.
3097                  */
3098                 if ((pp = pmc_find_process_descriptor(curthread->td_proc,
3099                     PMC_FLAG_NONE)) == NULL) {
3100                         return ESRCH;
3101                 } else {
3102                         opm = pp->pp_pmcs[PMC_ID_TO_ROWINDEX(pmcid)].pp_pmc;
3103                         if (opm == NULL)
3104                                 return ESRCH;
3105                         if ((opm->pm_flags & (PMC_F_ATTACHED_TO_OWNER|
3106                             PMC_F_DESCENDANTS)) != (PMC_F_ATTACHED_TO_OWNER|
3107                             PMC_F_DESCENDANTS))
3108                                 return ESRCH;
3109                         po = opm->pm_owner;
3110                 }
3111         }
3112
3113         if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL)
3114                 return EINVAL;
3115
3116         PMCDBG2(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm);
3117
3118         *pmc = pm;
3119         return 0;
3120 }
3121
3122 /*
3123  * Start a PMC.
3124  */
3125
3126 static int
3127 pmc_start(struct pmc *pm)
3128 {
3129         enum pmc_mode mode;
3130         struct pmc_owner *po;
3131         struct pmc_binding pb;
3132         struct pmc_classdep *pcd;
3133         int adjri, error, cpu, ri;
3134
3135         KASSERT(pm != NULL,
3136             ("[pmc,%d] null pm", __LINE__));
3137
3138         mode = PMC_TO_MODE(pm);
3139         ri   = PMC_TO_ROWINDEX(pm);
3140         pcd  = pmc_ri_to_classdep(md, ri, &adjri);
3141
3142         error = 0;
3143
3144         PMCDBG3(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, mode, ri);
3145
3146         po = pm->pm_owner;
3147
3148         /*
3149          * Disallow PMCSTART if a logfile is required but has not been
3150          * configured yet.
3151          */
3152         if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) &&
3153             (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
3154                 return (EDOOFUS);       /* programming error */
3155
3156         /*
3157          * If this is a sampling mode PMC, log mapping information for
3158          * the kernel modules that are currently loaded.
3159          */
3160         if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
3161             pmc_log_kernel_mappings(pm);
3162
3163         if (PMC_IS_VIRTUAL_MODE(mode)) {
3164
3165                 /*
3166                  * If a PMCATTACH has never been done on this PMC,
3167                  * attach it to its owner process.
3168                  */
3169
3170                 if (LIST_EMPTY(&pm->pm_targets))
3171                         error = (pm->pm_flags & PMC_F_ATTACH_DONE) ? ESRCH :
3172                             pmc_attach_process(po->po_owner, pm);
3173
3174                 /*
3175                  * If the PMC is attached to its owner, then force a context
3176                  * switch to ensure that the MD state gets set correctly.
3177                  */
3178
3179                 if (error == 0) {
3180                         pm->pm_state = PMC_STATE_RUNNING;
3181                         if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER)
3182                                 pmc_force_context_switch();
3183                 }
3184
3185                 return (error);
3186         }
3187
3188
3189         /*
3190          * A system-wide PMC.
3191          *
3192          * Add the owner to the global list if this is a system-wide
3193          * sampling PMC.
3194          */
3195
3196         if (mode == PMC_MODE_SS) {
3197                 /*
3198                  * Log mapping information for all existing processes in the
3199                  * system.  Subsequent mappings are logged as they happen;
3200                  * see pmc_process_mmap().
3201                  */
3202                 if (po->po_logprocmaps == 0) {
3203                         pmc_log_all_process_mappings(po);
3204                         po->po_logprocmaps = 1;
3205                 }
3206                 po->po_sscount++;
3207                 if (po->po_sscount == 1) {
3208                         atomic_add_rel_int(&pmc_ss_count, 1);
3209                         CK_LIST_INSERT_HEAD(&pmc_ss_owners, po, po_ssnext);
3210                         PMCDBG1(PMC,OPS,1, "po=%p in global list", po);
3211                 }
3212         }
3213
3214         /*
3215          * Move to the CPU associated with this
3216          * PMC, and start the hardware.
3217          */
3218
3219         pmc_save_cpu_binding(&pb);
3220
3221         cpu = PMC_TO_CPU(pm);
3222
3223         if (!pmc_cpu_is_active(cpu))
3224                 return (ENXIO);
3225
3226         pmc_select_cpu(cpu);
3227
3228         /*
3229          * global PMCs are configured at allocation time
3230          * so write out the initial value and start the PMC.
3231          */
3232
3233         pm->pm_state = PMC_STATE_RUNNING;
3234
3235         critical_enter();
3236         if ((error = pcd->pcd_write_pmc(cpu, adjri,
3237                  PMC_IS_SAMPLING_MODE(mode) ?
3238                  pm->pm_sc.pm_reloadcount :
3239                  pm->pm_sc.pm_initial)) == 0) {
3240                 /* If a sampling mode PMC, reset stalled state. */
3241                 if (PMC_IS_SAMPLING_MODE(mode))
3242                         pm->pm_pcpu_state[cpu].pps_stalled = 0;
3243
3244                 /* Indicate that we desire this to run. Start it. */
3245                 pm->pm_pcpu_state[cpu].pps_cpustate = 1;
3246                 error = pcd->pcd_start_pmc(cpu, adjri);
3247         }
3248         critical_exit();
3249
3250         pmc_restore_cpu_binding(&pb);
3251
3252         return (error);
3253 }
3254
3255 /*
3256  * Stop a PMC.
3257  */
3258
3259 static int
3260 pmc_stop(struct pmc *pm)
3261 {
3262         struct pmc_owner *po;
3263         struct pmc_binding pb;
3264         struct pmc_classdep *pcd;
3265         int adjri, cpu, error, ri;
3266
3267         KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__));
3268
3269         PMCDBG3(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm,
3270             PMC_TO_MODE(pm), PMC_TO_ROWINDEX(pm));
3271
3272         pm->pm_state = PMC_STATE_STOPPED;
3273
3274         /*
3275          * If the PMC is a virtual mode one, changing the state to
3276          * non-RUNNING is enough to ensure that the PMC never gets
3277          * scheduled.
3278          *
3279          * If this PMC is current running on a CPU, then it will
3280          * handled correctly at the time its target process is context
3281          * switched out.
3282          */
3283
3284         if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
3285                 return 0;
3286
3287         /*
3288          * A system-mode PMC.  Move to the CPU associated with
3289          * this PMC, and stop the hardware.  We update the
3290          * 'initial count' so that a subsequent PMCSTART will
3291          * resume counting from the current hardware count.
3292          */
3293
3294         pmc_save_cpu_binding(&pb);
3295
3296         cpu = PMC_TO_CPU(pm);
3297
3298         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
3299             ("[pmc,%d] illegal cpu=%d", __LINE__, cpu));
3300
3301         if (!pmc_cpu_is_active(cpu))
3302                 return ENXIO;
3303
3304         pmc_select_cpu(cpu);
3305
3306         ri = PMC_TO_ROWINDEX(pm);
3307         pcd = pmc_ri_to_classdep(md, ri, &adjri);
3308
3309         pm->pm_pcpu_state[cpu].pps_cpustate = 0;
3310         critical_enter();
3311         if ((error = pcd->pcd_stop_pmc(cpu, adjri)) == 0)
3312                 error = pcd->pcd_read_pmc(cpu, adjri, &pm->pm_sc.pm_initial);
3313         critical_exit();
3314
3315         pmc_restore_cpu_binding(&pb);
3316
3317         po = pm->pm_owner;
3318
3319         /* remove this owner from the global list of SS PMC owners */
3320         if (PMC_TO_MODE(pm) == PMC_MODE_SS) {
3321                 po->po_sscount--;
3322                 if (po->po_sscount == 0) {
3323                         atomic_subtract_rel_int(&pmc_ss_count, 1);
3324                         CK_LIST_REMOVE(po, po_ssnext);
3325                         epoch_wait_preempt(global_epoch_preempt);
3326                         PMCDBG1(PMC,OPS,2,"po=%p removed from global list", po);
3327                 }
3328         }
3329
3330         return (error);
3331 }
3332
3333 static struct pmc_classdep *
3334 pmc_class_to_classdep(enum pmc_class class)
3335 {
3336         int n;
3337
3338         for (n = 0; n < md->pmd_nclass; n++)
3339                 if (md->pmd_classdep[n].pcd_class == class)
3340                         return (&md->pmd_classdep[n]);
3341         return (NULL);
3342 }
3343
3344 #if defined(HWPMC_DEBUG) && defined(KTR)
3345 static const char *pmc_op_to_name[] = {
3346 #undef  __PMC_OP
3347 #define __PMC_OP(N, D)  #N ,
3348         __PMC_OPS()
3349         NULL
3350 };
3351 #endif
3352
3353 /*
3354  * The syscall interface
3355  */
3356
3357 #define PMC_GET_SX_XLOCK(...) do {              \
3358         sx_xlock(&pmc_sx);                      \
3359         if (pmc_hook == NULL) {                 \
3360                 sx_xunlock(&pmc_sx);            \
3361                 return __VA_ARGS__;             \
3362         }                                       \
3363 } while (0)
3364
3365 #define PMC_DOWNGRADE_SX() do {                 \
3366         sx_downgrade(&pmc_sx);                  \
3367         is_sx_downgraded = 1;                   \
3368 } while (0)
3369
3370 static int
3371 pmc_syscall_handler(struct thread *td, void *syscall_args)
3372 {
3373         int error, is_sx_downgraded, op;
3374         struct pmc_syscall_args *c;
3375         void *pmclog_proc_handle;
3376         void *arg;
3377
3378         c = (struct pmc_syscall_args *)syscall_args;
3379         op = c->pmop_code;
3380         arg = c->pmop_data;
3381         /* PMC isn't set up yet */
3382         if (pmc_hook == NULL)
3383                 return (EINVAL);
3384         if (op == PMC_OP_CONFIGURELOG) {
3385                 /*
3386                  * We cannot create the logging process inside
3387                  * pmclog_configure_log() because there is a LOR
3388                  * between pmc_sx and process structure locks.
3389                  * Instead, pre-create the process and ignite the loop
3390                  * if everything is fine, otherwise direct the process
3391                  * to exit.
3392                  */
3393                 error = pmclog_proc_create(td, &pmclog_proc_handle);
3394                 if (error != 0)
3395                         goto done_syscall;
3396         }
3397
3398         PMC_GET_SX_XLOCK(ENOSYS);
3399         is_sx_downgraded = 0;
3400         PMCDBG3(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op,
3401             pmc_op_to_name[op], arg);
3402
3403         error = 0;
3404         counter_u64_add(pmc_stats.pm_syscalls, 1);
3405
3406         switch (op) {
3407
3408
3409         /*
3410          * Configure a log file.
3411          *
3412          * XXX This OP will be reworked.
3413          */
3414
3415         case PMC_OP_CONFIGURELOG:
3416         {
3417                 struct proc *p;
3418                 struct pmc *pm;
3419                 struct pmc_owner *po;
3420                 struct pmc_op_configurelog cl;
3421
3422                 if ((error = copyin(arg, &cl, sizeof(cl))) != 0) {
3423                         pmclog_proc_ignite(pmclog_proc_handle, NULL);
3424                         break;
3425                 }
3426
3427                 /* mark this process as owning a log file */
3428                 p = td->td_proc;
3429                 if ((po = pmc_find_owner_descriptor(p)) == NULL)
3430                         if ((po = pmc_allocate_owner_descriptor(p)) == NULL) {
3431                                 pmclog_proc_ignite(pmclog_proc_handle, NULL);
3432                                 error = ENOMEM;
3433                                 break;
3434                         }
3435
3436                 /*
3437                  * If a valid fd was passed in, try to configure that,
3438                  * otherwise if 'fd' was less than zero and there was
3439                  * a log file configured, flush its buffers and
3440                  * de-configure it.
3441                  */
3442                 if (cl.pm_logfd >= 0) {
3443                         error = pmclog_configure_log(md, po, cl.pm_logfd);
3444                         pmclog_proc_ignite(pmclog_proc_handle, error == 0 ?
3445                             po : NULL);
3446                 } else if (po->po_flags & PMC_PO_OWNS_LOGFILE) {
3447                         pmclog_proc_ignite(pmclog_proc_handle, NULL);
3448                         error = pmclog_close(po);
3449                         if (error == 0) {
3450                                 LIST_FOREACH(pm, &po->po_pmcs, pm_next)
3451                                     if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
3452                                         pm->pm_state == PMC_STATE_RUNNING)
3453                                             pmc_stop(pm);
3454                                 error = pmclog_deconfigure_log(po);
3455                         }
3456                 } else {
3457                         pmclog_proc_ignite(pmclog_proc_handle, NULL);
3458                         error = EINVAL;
3459                 }
3460         }
3461         break;
3462
3463         /*
3464          * Flush a log file.
3465          */
3466
3467         case PMC_OP_FLUSHLOG:
3468         {
3469                 struct pmc_owner *po;
3470
3471                 sx_assert(&pmc_sx, SX_XLOCKED);
3472
3473                 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
3474                         error = EINVAL;
3475                         break;
3476                 }
3477
3478                 error = pmclog_flush(po, 0);
3479         }
3480         break;
3481
3482         /*
3483          * Close a log file.
3484          */
3485
3486         case PMC_OP_CLOSELOG:
3487         {
3488                 struct pmc_owner *po;
3489
3490                 sx_assert(&pmc_sx, SX_XLOCKED);
3491
3492                 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
3493                         error = EINVAL;
3494                         break;
3495                 }
3496
3497                 error = pmclog_close(po);
3498         }
3499         break;
3500
3501         /*
3502          * Retrieve hardware configuration.
3503          */
3504
3505         case PMC_OP_GETCPUINFO: /* CPU information */
3506         {
3507                 struct pmc_op_getcpuinfo gci;
3508                 struct pmc_classinfo *pci;
3509                 struct pmc_classdep *pcd;
3510                 int cl;
3511
3512                 gci.pm_cputype = md->pmd_cputype;
3513                 gci.pm_ncpu    = pmc_cpu_max();
3514                 gci.pm_npmc    = md->pmd_npmc;
3515                 gci.pm_nclass  = md->pmd_nclass;
3516                 pci = gci.pm_classes;
3517                 pcd = md->pmd_classdep;
3518                 for (cl = 0; cl < md->pmd_nclass; cl++, pci++, pcd++) {
3519                         pci->pm_caps  = pcd->pcd_caps;
3520                         pci->pm_class = pcd->pcd_class;
3521                         pci->pm_width = pcd->pcd_width;
3522                         pci->pm_num   = pcd->pcd_num;
3523                 }
3524                 error = copyout(&gci, arg, sizeof(gci));
3525         }
3526         break;
3527
3528         /*
3529          * Retrieve soft events list.
3530          */
3531         case PMC_OP_GETDYNEVENTINFO:
3532         {
3533                 enum pmc_class                  cl;
3534                 enum pmc_event                  ev;
3535                 struct pmc_op_getdyneventinfo   *gei;
3536                 struct pmc_dyn_event_descr      dev;
3537                 struct pmc_soft                 *ps;
3538                 uint32_t                        nevent;
3539
3540                 sx_assert(&pmc_sx, SX_LOCKED);
3541
3542                 gei = (struct pmc_op_getdyneventinfo *) arg;
3543
3544                 if ((error = copyin(&gei->pm_class, &cl, sizeof(cl))) != 0)
3545                         break;
3546
3547                 /* Only SOFT class is dynamic. */
3548                 if (cl != PMC_CLASS_SOFT) {
3549                         error = EINVAL;
3550                         break;
3551                 }
3552
3553                 nevent = 0;
3554                 for (ev = PMC_EV_SOFT_FIRST; (int)ev <= PMC_EV_SOFT_LAST; ev++) {
3555                         ps = pmc_soft_ev_acquire(ev);
3556                         if (ps == NULL)
3557                                 continue;
3558                         bcopy(&ps->ps_ev, &dev, sizeof(dev));
3559                         pmc_soft_ev_release(ps);
3560
3561                         error = copyout(&dev,
3562                             &gei->pm_events[nevent],
3563                             sizeof(struct pmc_dyn_event_descr));
3564                         if (error != 0)
3565                                 break;
3566                         nevent++;
3567                 }
3568                 if (error != 0)
3569                         break;
3570
3571                 error = copyout(&nevent, &gei->pm_nevent,
3572                     sizeof(nevent));
3573         }
3574         break;
3575
3576         /*
3577          * Get module statistics
3578          */
3579
3580         case PMC_OP_GETDRIVERSTATS:
3581         {
3582                 struct pmc_op_getdriverstats gms;
3583 #define CFETCH(a, b, field) a.field = counter_u64_fetch(b.field)
3584                 CFETCH(gms, pmc_stats, pm_intr_ignored);
3585                 CFETCH(gms, pmc_stats, pm_intr_processed);
3586                 CFETCH(gms, pmc_stats, pm_intr_bufferfull);
3587                 CFETCH(gms, pmc_stats, pm_syscalls);
3588                 CFETCH(gms, pmc_stats, pm_syscall_errors);
3589                 CFETCH(gms, pmc_stats, pm_buffer_requests);
3590                 CFETCH(gms, pmc_stats, pm_buffer_requests_failed);
3591                 CFETCH(gms, pmc_stats, pm_log_sweeps);
3592 #undef CFETCH
3593                 error = copyout(&gms, arg, sizeof(gms));
3594         }
3595         break;
3596
3597
3598         /*
3599          * Retrieve module version number
3600          */
3601
3602         case PMC_OP_GETMODULEVERSION:
3603         {
3604                 uint32_t cv, modv;
3605
3606                 /* retrieve the client's idea of the ABI version */
3607                 if ((error = copyin(arg, &cv, sizeof(uint32_t))) != 0)
3608                         break;
3609                 /* don't service clients newer than our driver */
3610                 modv = PMC_VERSION;
3611                 if ((cv & 0xFFFF0000) > (modv & 0xFFFF0000)) {
3612                         error = EPROGMISMATCH;
3613                         break;
3614                 }
3615                 error = copyout(&modv, arg, sizeof(int));
3616         }
3617         break;
3618
3619
3620         /*
3621          * Retrieve the state of all the PMCs on a given
3622          * CPU.
3623          */
3624
3625         case PMC_OP_GETPMCINFO:
3626         {
3627                 int ari;
3628                 struct pmc *pm;
3629                 size_t pmcinfo_size;
3630                 uint32_t cpu, n, npmc;
3631                 struct pmc_owner *po;
3632                 struct pmc_binding pb;
3633                 struct pmc_classdep *pcd;
3634                 struct pmc_info *p, *pmcinfo;
3635                 struct pmc_op_getpmcinfo *gpi;
3636
3637                 PMC_DOWNGRADE_SX();
3638
3639                 gpi = (struct pmc_op_getpmcinfo *) arg;
3640
3641                 if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0)
3642                         break;
3643
3644                 if (cpu >= pmc_cpu_max()) {
3645                         error = EINVAL;
3646                         break;
3647                 }
3648
3649                 if (!pmc_cpu_is_active(cpu)) {
3650                         error = ENXIO;
3651                         break;
3652                 }
3653
3654                 /* switch to CPU 'cpu' */
3655                 pmc_save_cpu_binding(&pb);
3656                 pmc_select_cpu(cpu);
3657
3658                 npmc = md->pmd_npmc;
3659
3660                 pmcinfo_size = npmc * sizeof(struct pmc_info);
3661                 pmcinfo = malloc(pmcinfo_size, M_PMC, M_WAITOK);
3662
3663                 p = pmcinfo;
3664
3665                 for (n = 0; n < md->pmd_npmc; n++, p++) {
3666
3667                         pcd = pmc_ri_to_classdep(md, n, &ari);
3668
3669                         KASSERT(pcd != NULL,
3670                             ("[pmc,%d] null pcd ri=%d", __LINE__, n));
3671
3672                         if ((error = pcd->pcd_describe(cpu, ari, p, &pm)) != 0)
3673                                 break;
3674
3675                         if (PMC_ROW_DISP_IS_STANDALONE(n))
3676                                 p->pm_rowdisp = PMC_DISP_STANDALONE;
3677                         else if (PMC_ROW_DISP_IS_THREAD(n))
3678                                 p->pm_rowdisp = PMC_DISP_THREAD;
3679                         else
3680                                 p->pm_rowdisp = PMC_DISP_FREE;
3681
3682                         p->pm_ownerpid = -1;
3683
3684                         if (pm == NULL) /* no PMC associated */
3685                                 continue;
3686
3687                         po = pm->pm_owner;
3688
3689                         KASSERT(po->po_owner != NULL,
3690                             ("[pmc,%d] pmc_owner had a null proc pointer",
3691                                 __LINE__));
3692
3693                         p->pm_ownerpid = po->po_owner->p_pid;
3694                         p->pm_mode     = PMC_TO_MODE(pm);
3695                         p->pm_event    = pm->pm_event;
3696                         p->pm_flags    = pm->pm_flags;
3697
3698                         if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
3699                                 p->pm_reloadcount =
3700                                     pm->pm_sc.pm_reloadcount;
3701                 }
3702
3703                 pmc_restore_cpu_binding(&pb);
3704
3705                 /* now copy out the PMC info collected */
3706                 if (error == 0)
3707                         error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size);
3708
3709                 free(pmcinfo, M_PMC);
3710         }
3711         break;
3712
3713
3714         /*
3715          * Set the administrative state of a PMC.  I.e. whether
3716          * the PMC is to be used or not.
3717          */
3718
3719         case PMC_OP_PMCADMIN:
3720         {
3721                 int cpu, ri;
3722                 enum pmc_state request;
3723                 struct pmc_cpu *pc;
3724                 struct pmc_hw *phw;
3725                 struct pmc_op_pmcadmin pma;
3726                 struct pmc_binding pb;
3727
3728                 sx_assert(&pmc_sx, SX_XLOCKED);
3729
3730                 KASSERT(td == curthread,
3731                     ("[pmc,%d] td != curthread", __LINE__));
3732
3733                 error = priv_check(td, PRIV_PMC_MANAGE);
3734                 if (error)
3735                         break;
3736
3737                 if ((error = copyin(arg, &pma, sizeof(pma))) != 0)
3738                         break;
3739
3740                 cpu = pma.pm_cpu;
3741
3742                 if (cpu < 0 || cpu >= (int) pmc_cpu_max()) {
3743                         error = EINVAL;
3744                         break;
3745                 }
3746
3747                 if (!pmc_cpu_is_active(cpu)) {
3748                         error = ENXIO;
3749                         break;
3750                 }
3751
3752                 request = pma.pm_state;
3753
3754                 if (request != PMC_STATE_DISABLED &&
3755                     request != PMC_STATE_FREE) {
3756                         error = EINVAL;
3757                         break;
3758                 }
3759
3760                 ri = pma.pm_pmc; /* pmc id == row index */
3761                 if (ri < 0 || ri >= (int) md->pmd_npmc) {
3762                         error = EINVAL;
3763                         break;
3764                 }
3765
3766                 /*
3767                  * We can't disable a PMC with a row-index allocated
3768                  * for process virtual PMCs.
3769                  */
3770
3771                 if (PMC_ROW_DISP_IS_THREAD(ri) &&
3772                     request == PMC_STATE_DISABLED) {
3773                         error = EBUSY;
3774                         break;
3775                 }
3776
3777                 /*
3778                  * otherwise, this PMC on this CPU is either free or
3779                  * in system-wide mode.
3780                  */
3781
3782                 pmc_save_cpu_binding(&pb);
3783                 pmc_select_cpu(cpu);
3784
3785                 pc  = pmc_pcpu[cpu];
3786                 phw = pc->pc_hwpmcs[ri];
3787
3788                 /*
3789                  * XXX do we need some kind of 'forced' disable?
3790                  */
3791
3792                 if (phw->phw_pmc == NULL) {
3793                         if (request == PMC_STATE_DISABLED &&
3794                             (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) {
3795                                 phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED;
3796                                 PMC_MARK_ROW_STANDALONE(ri);
3797                         } else if (request == PMC_STATE_FREE &&
3798                             (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) {
3799                                 phw->phw_state |=  PMC_PHW_FLAG_IS_ENABLED;
3800                                 PMC_UNMARK_ROW_STANDALONE(ri);
3801                         }
3802                         /* other cases are a no-op */
3803                 } else
3804                         error = EBUSY;
3805
3806                 pmc_restore_cpu_binding(&pb);
3807         }
3808         break;
3809
3810
3811         /*
3812          * Allocate a PMC.
3813          */
3814
3815         case PMC_OP_PMCALLOCATE:
3816         {
3817                 int adjri, n;
3818                 u_int cpu;
3819                 uint32_t caps;
3820                 struct pmc *pmc;
3821                 enum pmc_mode mode;
3822                 struct pmc_hw *phw;
3823                 struct pmc_binding pb;
3824                 struct pmc_classdep *pcd;
3825                 struct pmc_op_pmcallocate pa;
3826
3827                 if ((error = copyin(arg, &pa, sizeof(pa))) != 0)
3828                         break;
3829
3830                 caps = pa.pm_caps;
3831                 mode = pa.pm_mode;
3832                 cpu  = pa.pm_cpu;
3833
3834                 if ((mode != PMC_MODE_SS  &&  mode != PMC_MODE_SC  &&
3835                      mode != PMC_MODE_TS  &&  mode != PMC_MODE_TC) ||
3836                     (cpu != (u_int) PMC_CPU_ANY && cpu >= pmc_cpu_max())) {
3837                         error = EINVAL;
3838                         break;
3839                 }
3840
3841                 /*
3842                  * Virtual PMCs should only ask for a default CPU.
3843                  * System mode PMCs need to specify a non-default CPU.
3844                  */
3845
3846                 if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != (u_int) PMC_CPU_ANY) ||
3847                     (PMC_IS_SYSTEM_MODE(mode) && cpu == (u_int) PMC_CPU_ANY)) {
3848                         error = EINVAL;
3849                         break;
3850                 }
3851
3852                 /*
3853                  * Check that an inactive CPU is not being asked for.
3854                  */
3855
3856                 if (PMC_IS_SYSTEM_MODE(mode) && !pmc_cpu_is_active(cpu)) {
3857                         error = ENXIO;
3858                         break;
3859                 }
3860
3861                 /*
3862                  * Refuse an allocation for a system-wide PMC if this
3863                  * process has been jailed, or if this process lacks
3864                  * super-user credentials and the sysctl tunable
3865                  * 'security.bsd.unprivileged_syspmcs' is zero.
3866                  */
3867
3868                 if (PMC_IS_SYSTEM_MODE(mode)) {
3869                         if (jailed(curthread->td_ucred)) {
3870                                 error = EPERM;
3871                                 break;
3872                         }
3873                         if (!pmc_unprivileged_syspmcs) {
3874                                 error = priv_check(curthread,
3875                                     PRIV_PMC_SYSTEM);
3876                                 if (error)
3877                                         break;
3878                         }
3879                 }
3880
3881                 /*
3882                  * Look for valid values for 'pm_flags'
3883                  */
3884
3885                 if ((pa.pm_flags & ~(PMC_F_DESCENDANTS | PMC_F_LOG_PROCCSW |
3886                     PMC_F_LOG_PROCEXIT | PMC_F_CALLCHAIN |
3887                     PMC_F_USERCALLCHAIN)) != 0) {
3888                         error = EINVAL;
3889                         break;
3890                 }
3891
3892                 /* PMC_F_USERCALLCHAIN is only valid with PMC_F_CALLCHAIN */
3893                 if ((pa.pm_flags & (PMC_F_CALLCHAIN | PMC_F_USERCALLCHAIN)) ==
3894                     PMC_F_USERCALLCHAIN) {
3895                         error = EINVAL;
3896                         break;
3897                 }
3898
3899                 /* PMC_F_USERCALLCHAIN is only valid for sampling mode */
3900                 if (pa.pm_flags & PMC_F_USERCALLCHAIN &&
3901                         mode != PMC_MODE_TS && mode != PMC_MODE_SS) {
3902                         error = EINVAL;
3903                         break;
3904                 }
3905
3906                 /* process logging options are not allowed for system PMCs */
3907                 if (PMC_IS_SYSTEM_MODE(mode) && (pa.pm_flags &
3908                     (PMC_F_LOG_PROCCSW | PMC_F_LOG_PROCEXIT))) {
3909                         error = EINVAL;
3910                         break;
3911                 }
3912
3913                 /*
3914                  * All sampling mode PMCs need to be able to interrupt the
3915                  * CPU.
3916                  */
3917                 if (PMC_IS_SAMPLING_MODE(mode))
3918                         caps |= PMC_CAP_INTERRUPT;
3919
3920                 /* A valid class specifier should have been passed in. */
3921                 pcd = pmc_class_to_classdep(pa.pm_class);
3922                 if (pcd == NULL) {
3923                         error = EINVAL;
3924                         break;
3925                 }
3926
3927                 /* The requested PMC capabilities should be feasible. */
3928                 if ((pcd->pcd_caps & caps) != caps) {
3929                         error = EOPNOTSUPP;
3930                         break;
3931                 }
3932
3933                 PMCDBG4(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d",
3934                     pa.pm_ev, caps, mode, cpu);
3935
3936                 pmc = pmc_allocate_pmc_descriptor();
3937                 pmc->pm_id    = PMC_ID_MAKE_ID(cpu,pa.pm_mode,pa.pm_class,
3938                     PMC_ID_INVALID);
3939                 pmc->pm_event = pa.pm_ev;
3940                 pmc->pm_state = PMC_STATE_FREE;
3941                 pmc->pm_caps  = caps;
3942                 pmc->pm_flags = pa.pm_flags;
3943
3944                 /* XXX set lower bound on sampling for process counters */
3945                 if (PMC_IS_SAMPLING_MODE(mode))
3946                         pmc->pm_sc.pm_reloadcount = pa.pm_count;
3947                 else
3948                         pmc->pm_sc.pm_initial = pa.pm_count;
3949
3950                 /* switch thread to CPU 'cpu' */
3951                 pmc_save_cpu_binding(&pb);
3952
3953 #define PMC_IS_SHAREABLE_PMC(cpu, n)                            \
3954         (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state &           \
3955          PMC_PHW_FLAG_IS_SHAREABLE)
3956 #define PMC_IS_UNALLOCATED(cpu, n)                              \
3957         (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL)
3958
3959                 if (PMC_IS_SYSTEM_MODE(mode)) {
3960                         pmc_select_cpu(cpu);
3961                         for (n = pcd->pcd_ri; n < (int) md->pmd_npmc; n++) {
3962                                 pcd = pmc_ri_to_classdep(md, n, &adjri);
3963                                 if (pmc_can_allocate_row(n, mode) == 0 &&
3964                                     pmc_can_allocate_rowindex(
3965                                             curthread->td_proc, n, cpu) == 0 &&
3966                                     (PMC_IS_UNALLOCATED(cpu, n) ||
3967                                      PMC_IS_SHAREABLE_PMC(cpu, n)) &&
3968                                     pcd->pcd_allocate_pmc(cpu, adjri, pmc,
3969                                         &pa) == 0)
3970                                         break;
3971                         }
3972                 } else {
3973                         /* Process virtual mode */
3974                         for (n = pcd->pcd_ri; n < (int) md->pmd_npmc; n++) {
3975                                 pcd = pmc_ri_to_classdep(md, n, &adjri);
3976                                 if (pmc_can_allocate_row(n, mode) == 0 &&
3977                                     pmc_can_allocate_rowindex(
3978                                             curthread->td_proc, n,
3979                                             PMC_CPU_ANY) == 0 &&
3980                                     pcd->pcd_allocate_pmc(curthread->td_oncpu,
3981                                         adjri, pmc, &pa) == 0)
3982                                         break;
3983                         }
3984                 }
3985
3986 #undef  PMC_IS_UNALLOCATED
3987 #undef  PMC_IS_SHAREABLE_PMC
3988
3989                 pmc_restore_cpu_binding(&pb);
3990
3991                 if (n == (int) md->pmd_npmc) {
3992                         pmc_destroy_pmc_descriptor(pmc);
3993                         pmc = NULL;
3994                         error = EINVAL;
3995                         break;
3996                 }
3997
3998                 /* Fill in the correct value in the ID field */
3999                 pmc->pm_id = PMC_ID_MAKE_ID(cpu,mode,pa.pm_class,n);
4000
4001                 PMCDBG5(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x",
4002                     pmc->pm_event, pa.pm_class, mode, n, pmc->pm_id);
4003
4004                 /* Process mode PMCs with logging enabled need log files */
4005                 if (pmc->pm_flags & (PMC_F_LOG_PROCEXIT | PMC_F_LOG_PROCCSW))
4006                         pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
4007
4008                 /* All system mode sampling PMCs require a log file */
4009                 if (PMC_IS_SAMPLING_MODE(mode) && PMC_IS_SYSTEM_MODE(mode))
4010                         pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
4011
4012                 /*
4013                  * Configure global pmc's immediately
4014                  */
4015
4016                 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pmc))) {
4017
4018                         pmc_save_cpu_binding(&pb);
4019                         pmc_select_cpu(cpu);
4020
4021                         phw = pmc_pcpu[cpu]->pc_hwpmcs[n];
4022                         pcd = pmc_ri_to_classdep(md, n, &adjri);
4023
4024                         if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0 ||
4025                             (error = pcd->pcd_config_pmc(cpu, adjri, pmc)) != 0) {
4026                                 (void) pcd->pcd_release_pmc(cpu, adjri, pmc);
4027                                 pmc_destroy_pmc_descriptor(pmc);
4028                                 pmc = NULL;
4029                                 pmc_restore_cpu_binding(&pb);
4030                                 error = EPERM;
4031                                 break;
4032                         }
4033
4034                         pmc_restore_cpu_binding(&pb);
4035                 }
4036
4037                 pmc->pm_state    = PMC_STATE_ALLOCATED;
4038                 pmc->pm_class   = pa.pm_class;
4039
4040                 /*
4041                  * mark row disposition
4042                  */
4043
4044                 if (PMC_IS_SYSTEM_MODE(mode))
4045                         PMC_MARK_ROW_STANDALONE(n);
4046                 else
4047                         PMC_MARK_ROW_THREAD(n);
4048
4049                 /*
4050                  * Register this PMC with the current thread as its owner.
4051                  */
4052
4053                 if ((error =
4054                     pmc_register_owner(curthread->td_proc, pmc)) != 0) {
4055                         pmc_release_pmc_descriptor(pmc);
4056                         pmc_destroy_pmc_descriptor(pmc);
4057                         pmc = NULL;
4058                         break;
4059                 }
4060
4061
4062                 /*
4063                  * Return the allocated index.
4064                  */
4065
4066                 pa.pm_pmcid = pmc->pm_id;
4067
4068                 error = copyout(&pa, arg, sizeof(pa));
4069         }
4070         break;
4071
4072
4073         /*
4074          * Attach a PMC to a process.
4075          */
4076
4077         case PMC_OP_PMCATTACH:
4078         {
4079                 struct pmc *pm;
4080                 struct proc *p;
4081                 struct pmc_op_pmcattach a;
4082
4083                 sx_assert(&pmc_sx, SX_XLOCKED);
4084
4085                 if ((error = copyin(arg, &a, sizeof(a))) != 0)
4086                         break;
4087
4088                 if (a.pm_pid < 0) {
4089                         error = EINVAL;
4090                         break;
4091                 } else if (a.pm_pid == 0)
4092                         a.pm_pid = td->td_proc->p_pid;
4093
4094                 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
4095                         break;
4096
4097                 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
4098                         error = EINVAL;
4099                         break;
4100                 }
4101
4102                 /* PMCs may be (re)attached only when allocated or stopped */
4103                 if (pm->pm_state == PMC_STATE_RUNNING) {
4104                         error = EBUSY;
4105                         break;
4106                 } else if (pm->pm_state != PMC_STATE_ALLOCATED &&
4107                     pm->pm_state != PMC_STATE_STOPPED) {
4108                         error = EINVAL;
4109                         break;
4110                 }
4111
4112                 /* lookup pid */
4113                 if ((p = pfind(a.pm_pid)) == NULL) {
4114                         error = ESRCH;
4115                         break;
4116                 }
4117
4118                 /*
4119                  * Ignore processes that are working on exiting.
4120                  */
4121                 if (p->p_flag & P_WEXIT) {
4122                         error = ESRCH;
4123                         PROC_UNLOCK(p); /* pfind() returns a locked process */
4124                         break;
4125                 }
4126
4127                 /*
4128                  * we are allowed to attach a PMC to a process if
4129                  * we can debug it.
4130                  */
4131                 error = p_candebug(curthread, p);
4132
4133                 PROC_UNLOCK(p);
4134
4135                 if (error == 0)
4136                         error = pmc_attach_process(p, pm);
4137         }
4138         break;
4139
4140
4141         /*
4142          * Detach an attached PMC from a process.
4143          */
4144
4145         case PMC_OP_PMCDETACH:
4146         {
4147                 struct pmc *pm;
4148                 struct proc *p;
4149                 struct pmc_op_pmcattach a;
4150
4151                 if ((error = copyin(arg, &a, sizeof(a))) != 0)
4152                         break;
4153
4154                 if (a.pm_pid < 0) {
4155                         error = EINVAL;
4156                         break;
4157                 } else if (a.pm_pid == 0)
4158                         a.pm_pid = td->td_proc->p_pid;
4159
4160                 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
4161                         break;
4162
4163                 if ((p = pfind(a.pm_pid)) == NULL) {
4164                         error = ESRCH;
4165                         break;
4166                 }
4167
4168                 /*
4169                  * Treat processes that are in the process of exiting
4170                  * as if they were not present.
4171                  */
4172
4173                 if (p->p_flag & P_WEXIT)
4174                         error = ESRCH;
4175
4176                 PROC_UNLOCK(p); /* pfind() returns a locked process */
4177
4178                 if (error == 0)
4179                         error = pmc_detach_process(p, pm);
4180         }
4181         break;
4182
4183
4184         /*
4185          * Retrieve the MSR number associated with the counter
4186          * 'pmc_id'.  This allows processes to directly use RDPMC
4187          * instructions to read their PMCs, without the overhead of a
4188          * system call.
4189          */
4190
4191         case PMC_OP_PMCGETMSR:
4192         {
4193                 int adjri, ri;
4194                 struct pmc *pm;
4195                 struct pmc_target *pt;
4196                 struct pmc_op_getmsr gm;
4197                 struct pmc_classdep *pcd;
4198
4199                 PMC_DOWNGRADE_SX();
4200
4201                 if ((error = copyin(arg, &gm, sizeof(gm))) != 0)
4202                         break;
4203
4204                 if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0)
4205                         break;
4206
4207                 /*
4208                  * The allocated PMC has to be a process virtual PMC,
4209                  * i.e., of type MODE_T[CS].  Global PMCs can only be
4210                  * read using the PMCREAD operation since they may be
4211                  * allocated on a different CPU than the one we could
4212                  * be running on at the time of the RDPMC instruction.
4213                  *
4214                  * The GETMSR operation is not allowed for PMCs that
4215                  * are inherited across processes.
4216                  */
4217
4218                 if (!PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) ||
4219                     (pm->pm_flags & PMC_F_DESCENDANTS)) {
4220                         error = EINVAL;
4221                         break;
4222                 }
4223
4224                 /*
4225                  * It only makes sense to use a RDPMC (or its
4226                  * equivalent instruction on non-x86 architectures) on
4227                  * a process that has allocated and attached a PMC to
4228                  * itself.  Conversely the PMC is only allowed to have
4229                  * one process attached to it -- its owner.
4230                  */
4231
4232                 if ((pt = LIST_FIRST(&pm->pm_targets)) == NULL ||
4233                     LIST_NEXT(pt, pt_next) != NULL ||
4234                     pt->pt_process->pp_proc != pm->pm_owner->po_owner) {
4235                         error = EINVAL;
4236                         break;
4237                 }
4238
4239                 ri = PMC_TO_ROWINDEX(pm);
4240                 pcd = pmc_ri_to_classdep(md, ri, &adjri);
4241
4242                 /* PMC class has no 'GETMSR' support */
4243                 if (pcd->pcd_get_msr == NULL) {
4244                         error = ENOSYS;
4245                         break;
4246                 }
4247
4248                 if ((error = (*pcd->pcd_get_msr)(adjri, &gm.pm_msr)) < 0)
4249                         break;
4250
4251                 if ((error = copyout(&gm, arg, sizeof(gm))) < 0)
4252                         break;
4253
4254                 /*
4255                  * Mark our process as using MSRs.  Update machine
4256                  * state using a forced context switch.
4257                  */
4258
4259                 pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS;
4260                 pmc_force_context_switch();
4261
4262         }
4263         break;
4264
4265         /*
4266          * Release an allocated PMC
4267          */
4268
4269         case PMC_OP_PMCRELEASE:
4270         {
4271                 pmc_id_t pmcid;
4272                 struct pmc *pm;
4273                 struct pmc_owner *po;
4274                 struct pmc_op_simple sp;
4275
4276                 /*
4277                  * Find PMC pointer for the named PMC.
4278                  *
4279                  * Use pmc_release_pmc_descriptor() to switch off the
4280                  * PMC, remove all its target threads, and remove the
4281                  * PMC from its owner's list.
4282                  *
4283                  * Remove the owner record if this is the last PMC
4284                  * owned.
4285                  *
4286                  * Free up space.
4287                  */
4288
4289                 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
4290                         break;
4291
4292                 pmcid = sp.pm_pmcid;
4293
4294                 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
4295                         break;
4296
4297                 po = pm->pm_owner;
4298                 pmc_release_pmc_descriptor(pm);
4299                 pmc_maybe_remove_owner(po);
4300                 pmc_destroy_pmc_descriptor(pm);
4301         }
4302         break;
4303
4304
4305         /*
4306          * Read and/or write a PMC.
4307          */
4308
4309         case PMC_OP_PMCRW:
4310         {
4311                 int adjri;
4312                 struct pmc *pm;
4313                 uint32_t cpu, ri;
4314                 pmc_value_t oldvalue;
4315                 struct pmc_binding pb;
4316                 struct pmc_op_pmcrw prw;
4317                 struct pmc_classdep *pcd;
4318                 struct pmc_op_pmcrw *pprw;
4319
4320                 PMC_DOWNGRADE_SX();
4321
4322                 if ((error = copyin(arg, &prw, sizeof(prw))) != 0)
4323                         break;
4324
4325                 ri = 0;
4326                 PMCDBG2(PMC,OPS,1, "rw id=%d flags=0x%x", prw.pm_pmcid,
4327                     prw.pm_flags);
4328
4329                 /* must have at least one flag set */
4330                 if ((prw.pm_flags & (PMC_F_OLDVALUE|PMC_F_NEWVALUE)) == 0) {
4331                         error = EINVAL;
4332                         break;
4333                 }
4334
4335                 /* locate pmc descriptor */
4336                 if ((error = pmc_find_pmc(prw.pm_pmcid, &pm)) != 0)
4337                         break;
4338
4339                 /* Can't read a PMC that hasn't been started. */
4340                 if (pm->pm_state != PMC_STATE_ALLOCATED &&
4341                     pm->pm_state != PMC_STATE_STOPPED &&
4342                     pm->pm_state != PMC_STATE_RUNNING) {
4343                         error = EINVAL;
4344                         break;
4345                 }
4346
4347                 /* writing a new value is allowed only for 'STOPPED' pmcs */
4348                 if (pm->pm_state == PMC_STATE_RUNNING &&
4349                     (prw.pm_flags & PMC_F_NEWVALUE)) {
4350                         error = EBUSY;
4351                         break;
4352                 }
4353
4354                 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) {
4355
4356                         /*
4357                          * If this PMC is attached to its owner (i.e.,
4358                          * the process requesting this operation) and
4359                          * is running, then attempt to get an
4360                          * upto-date reading from hardware for a READ.
4361                          * Writes are only allowed when the PMC is
4362                          * stopped, so only update the saved value
4363                          * field.
4364                          *
4365                          * If the PMC is not running, or is not
4366                          * attached to its owner, read/write to the
4367                          * savedvalue field.
4368                          */
4369
4370                         ri = PMC_TO_ROWINDEX(pm);
4371                         pcd = pmc_ri_to_classdep(md, ri, &adjri);
4372
4373                         mtx_pool_lock_spin(pmc_mtxpool, pm);
4374                         cpu = curthread->td_oncpu;
4375
4376                         if (prw.pm_flags & PMC_F_OLDVALUE) {
4377                                 if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) &&
4378                                     (pm->pm_state == PMC_STATE_RUNNING))
4379                                         error = (*pcd->pcd_read_pmc)(cpu, adjri,
4380                                             &oldvalue);
4381                                 else
4382                                         oldvalue = pm->pm_gv.pm_savedvalue;
4383                         }
4384                         if (prw.pm_flags & PMC_F_NEWVALUE)
4385                                 pm->pm_gv.pm_savedvalue = prw.pm_value;
4386
4387                         mtx_pool_unlock_spin(pmc_mtxpool, pm);
4388
4389                 } else { /* System mode PMCs */
4390                         cpu = PMC_TO_CPU(pm);
4391                         ri  = PMC_TO_ROWINDEX(pm);
4392                         pcd = pmc_ri_to_classdep(md, ri, &adjri);
4393
4394                         if (!pmc_cpu_is_active(cpu)) {
4395                                 error = ENXIO;
4396                                 break;
4397                         }
4398
4399                         /* move this thread to CPU 'cpu' */
4400                         pmc_save_cpu_binding(&pb);
4401                         pmc_select_cpu(cpu);
4402
4403                         critical_enter();
4404                         /* save old value */
4405                         if (prw.pm_flags & PMC_F_OLDVALUE)
4406                                 if ((error = (*pcd->pcd_read_pmc)(cpu, adjri,
4407                                          &oldvalue)))
4408                                         goto error;
4409                         /* write out new value */
4410                         if (prw.pm_flags & PMC_F_NEWVALUE)
4411                                 error = (*pcd->pcd_write_pmc)(cpu, adjri,
4412                                     prw.pm_value);
4413                 error:
4414                         critical_exit();
4415                         pmc_restore_cpu_binding(&pb);
4416                         if (error)
4417                                 break;
4418                 }
4419
4420                 pprw = (struct pmc_op_pmcrw *) arg;
4421
4422 #ifdef  HWPMC_DEBUG
4423                 if (prw.pm_flags & PMC_F_NEWVALUE)
4424                         PMCDBG3(PMC,OPS,2, "rw id=%d new %jx -> old %jx",
4425                             ri, prw.pm_value, oldvalue);
4426                 else if (prw.pm_flags & PMC_F_OLDVALUE)
4427                         PMCDBG2(PMC,OPS,2, "rw id=%d -> old %jx", ri, oldvalue);
4428 #endif
4429
4430                 /* return old value if requested */
4431                 if (prw.pm_flags & PMC_F_OLDVALUE)
4432                         if ((error = copyout(&oldvalue, &pprw->pm_value,
4433                                  sizeof(prw.pm_value))))
4434                                 break;
4435
4436         }
4437         break;
4438
4439
4440         /*
4441          * Set the sampling rate for a sampling mode PMC and the
4442          * initial count for a counting mode PMC.
4443          */
4444
4445         case PMC_OP_PMCSETCOUNT:
4446         {
4447                 struct pmc *pm;
4448                 struct pmc_op_pmcsetcount sc;
4449
4450                 PMC_DOWNGRADE_SX();
4451
4452                 if ((error = copyin(arg, &sc, sizeof(sc))) != 0)
4453                         break;
4454
4455                 if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0)
4456                         break;
4457
4458                 if (pm->pm_state == PMC_STATE_RUNNING) {
4459                         error = EBUSY;
4460                         break;
4461                 }
4462
4463                 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
4464                         pm->pm_sc.pm_reloadcount = sc.pm_count;
4465                 else
4466                         pm->pm_sc.pm_initial = sc.pm_count;
4467         }
4468         break;
4469
4470
4471         /*
4472          * Start a PMC.
4473          */
4474
4475         case PMC_OP_PMCSTART:
4476         {
4477                 pmc_id_t pmcid;
4478                 struct pmc *pm;
4479                 struct pmc_op_simple sp;
4480
4481                 sx_assert(&pmc_sx, SX_XLOCKED);
4482
4483                 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
4484                         break;
4485
4486                 pmcid = sp.pm_pmcid;
4487
4488                 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
4489                         break;
4490
4491                 KASSERT(pmcid == pm->pm_id,
4492                     ("[pmc,%d] pmcid %x != id %x", __LINE__,
4493                         pm->pm_id, pmcid));
4494
4495                 if (pm->pm_state == PMC_STATE_RUNNING) /* already running */
4496                         break;
4497                 else if (pm->pm_state != PMC_STATE_STOPPED &&
4498                     pm->pm_state != PMC_STATE_ALLOCATED) {
4499                         error = EINVAL;
4500                         break;
4501                 }
4502
4503                 error = pmc_start(pm);
4504         }
4505         break;
4506
4507
4508         /*
4509          * Stop a PMC.
4510          */
4511
4512         case PMC_OP_PMCSTOP:
4513         {
4514                 pmc_id_t pmcid;
4515                 struct pmc *pm;
4516                 struct pmc_op_simple sp;
4517
4518                 PMC_DOWNGRADE_SX();
4519
4520                 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
4521                         break;
4522
4523                 pmcid = sp.pm_pmcid;
4524
4525                 /*
4526                  * Mark the PMC as inactive and invoke the MD stop
4527                  * routines if needed.
4528                  */
4529
4530                 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
4531                         break;
4532
4533                 KASSERT(pmcid == pm->pm_id,
4534                     ("[pmc,%d] pmc id %x != pmcid %x", __LINE__,
4535                         pm->pm_id, pmcid));
4536
4537                 if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */
4538                         break;
4539                 else if (pm->pm_state != PMC_STATE_RUNNING) {
4540                         error = EINVAL;
4541                         break;
4542                 }
4543
4544                 error = pmc_stop(pm);
4545         }
4546         break;
4547
4548
4549         /*
4550          * Write a user supplied value to the log file.
4551          */
4552
4553         case PMC_OP_WRITELOG:
4554         {
4555                 struct pmc_op_writelog wl;
4556                 struct pmc_owner *po;
4557
4558                 PMC_DOWNGRADE_SX();
4559
4560                 if ((error = copyin(arg, &wl, sizeof(wl))) != 0)
4561                         break;
4562
4563                 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
4564                         error = EINVAL;
4565                         break;
4566                 }
4567
4568                 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
4569                         error = EINVAL;
4570                         break;
4571                 }
4572
4573                 error = pmclog_process_userlog(po, &wl);
4574         }
4575         break;
4576
4577
4578         default:
4579                 error = EINVAL;
4580                 break;
4581         }
4582
4583         if (is_sx_downgraded)
4584                 sx_sunlock(&pmc_sx);
4585         else
4586                 sx_xunlock(&pmc_sx);
4587 done_syscall:
4588         if (error)
4589                 counter_u64_add(pmc_stats.pm_syscall_errors, 1);
4590
4591         return (error);
4592 }
4593
4594 /*
4595  * Helper functions
4596  */
4597
4598
4599 /*
4600  * Mark the thread as needing callchain capture and post an AST.  The
4601  * actual callchain capture will be done in a context where it is safe
4602  * to take page faults.
4603  */
4604
4605 static void
4606 pmc_post_callchain_callback(void)
4607 {
4608         struct thread *td;
4609
4610         td = curthread;
4611
4612         /*
4613          * If there is multiple PMCs for the same interrupt ignore new post
4614          */
4615         if (td->td_pflags & TDP_CALLCHAIN)
4616                 return;
4617
4618         /*
4619          * Mark this thread as needing callchain capture.
4620          * `td->td_pflags' will be safe to touch because this thread
4621          * was in user space when it was interrupted.
4622          */
4623         td->td_pflags |= TDP_CALLCHAIN;
4624
4625         /*
4626          * Don't let this thread migrate between CPUs until callchain
4627          * capture completes.
4628          */
4629         sched_pin();
4630
4631         return;
4632 }
4633
4634 /*
4635  * Find a free slot in the per-cpu array of samples and capture the
4636  * current callchain there.  If a sample was successfully added, a bit
4637  * is set in mask 'pmc_cpumask' denoting that the DO_SAMPLES hook
4638  * needs to be invoked from the clock handler.
4639  *
4640  * This function is meant to be called from an NMI handler.  It cannot
4641  * use any of the locking primitives supplied by the OS.
4642  */
4643
4644 static int
4645 pmc_add_sample(int ring, struct pmc *pm, struct trapframe *tf)
4646 {
4647         int error, cpu, callchaindepth, inuserspace;
4648         struct thread *td;
4649         struct pmc_sample *ps;
4650         struct pmc_samplebuffer *psb;
4651
4652         error = 0;
4653
4654         /*
4655          * Allocate space for a sample buffer.
4656          */
4657         cpu = curcpu;
4658         psb = pmc_pcpu[cpu]->pc_sb[ring];
4659         inuserspace = TRAPF_USERMODE(tf);
4660         ps = psb->ps_write;
4661         if (ps->ps_nsamples == PMC_SAMPLE_INUSE) {
4662                 counter_u64_add(ps->ps_pmc->pm_runcount, -1);
4663                 counter_u64_add(pmc_stats.pm_overwrites, 1);
4664                 ps->ps_nsamples = 0;
4665         } else if (ps->ps_nsamples) {   /* in use, reader hasn't caught up */
4666                 pm->pm_pcpu_state[cpu].pps_stalled = 1;
4667                 counter_u64_add(pmc_stats.pm_intr_bufferfull, 1);
4668                 PMCDBG6(SAM,INT,1,"(spc) cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d",
4669                     cpu, pm, (void *) tf, inuserspace,
4670                     (int) (psb->ps_write - psb->ps_samples),
4671                     (int) (psb->ps_read - psb->ps_samples));
4672                 callchaindepth = 1;
4673                 error = ENOMEM;
4674                 goto done;
4675         }
4676
4677         /* Fill in entry. */
4678         PMCDBG6(SAM,INT,1,"cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", cpu, pm,
4679             (void *) tf, inuserspace,
4680             (int) (psb->ps_write - psb->ps_samples),
4681             (int) (psb->ps_read - psb->ps_samples));
4682
4683         KASSERT(counter_u64_fetch(pm->pm_runcount) >= 0,
4684             ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm,
4685                  (unsigned long)counter_u64_fetch(pm->pm_runcount)));
4686
4687         counter_u64_add(pm->pm_runcount, 1);    /* hold onto PMC */
4688
4689         td = curthread;
4690         ps->ps_pmc = pm;
4691         ps->ps_td = td;
4692         ps->ps_pid = td->td_proc->p_pid;
4693         ps->ps_tid = td->td_tid;
4694         ps->ps_tsc = pmc_rdtsc();
4695
4696         ps->ps_cpu = cpu;
4697         ps->ps_flags = inuserspace ? PMC_CC_F_USERSPACE : 0;
4698
4699         callchaindepth = (pm->pm_flags & PMC_F_CALLCHAIN) ?
4700             pmc_callchaindepth : 1;
4701
4702         if (callchaindepth == 1)
4703                 ps->ps_pc[0] = PMC_TRAPFRAME_TO_PC(tf);
4704         else {
4705                 /*
4706                  * Kernel stack traversals can be done immediately,
4707                  * while we defer to an AST for user space traversals.
4708                  */
4709                 if (!inuserspace) {
4710                         callchaindepth =
4711                             pmc_save_kernel_callchain(ps->ps_pc,
4712                                 callchaindepth, tf);
4713                 } else {
4714                         pmc_post_callchain_callback();
4715                         callchaindepth = PMC_SAMPLE_INUSE;
4716                 }
4717         }
4718
4719         ps->ps_nsamples = callchaindepth;       /* mark entry as in use */
4720         if (ring == PMC_UR) {
4721                 ps->ps_nsamples_actual = callchaindepth;        /* mark entry as in use */
4722                 ps->ps_nsamples = PMC_SAMPLE_INUSE;
4723         } else
4724                 ps->ps_nsamples = callchaindepth;       /* mark entry as in use */
4725         /* increment write pointer, modulo ring buffer size */
4726         ps++;
4727         if (ps == psb->ps_fence)
4728                 psb->ps_write = psb->ps_samples;
4729         else
4730                 psb->ps_write = ps;
4731
4732  done:
4733         /* mark CPU as needing processing */
4734         if (callchaindepth != PMC_SAMPLE_INUSE)
4735                 DPCPU_SET(pmc_sampled, 1);
4736
4737         return (error);
4738 }
4739
4740 /*
4741  * Interrupt processing.
4742  *
4743  * This function is meant to be called from an NMI handler.  It cannot
4744  * use any of the locking primitives supplied by the OS.
4745  */
4746
4747 int
4748 pmc_process_interrupt(int ring, struct pmc *pm, struct trapframe *tf)
4749 {
4750         struct thread *td;
4751
4752         td = curthread;
4753         if ((pm->pm_flags & PMC_F_USERCALLCHAIN) &&
4754             (td->td_proc->p_flag & P_KPROC) == 0 &&
4755             !TRAPF_USERMODE(tf)) {
4756                 atomic_add_int(&td->td_pmcpend, 1);
4757                 return (pmc_add_sample(PMC_UR, pm, tf));
4758         }
4759         return (pmc_add_sample(ring, pm, tf));
4760 }
4761
4762 /*
4763  * Capture a user call chain.  This function will be called from ast()
4764  * before control returns to userland and before the process gets
4765  * rescheduled.
4766  */
4767
4768 static void
4769 pmc_capture_user_callchain(int cpu, int ring, struct trapframe *tf)
4770 {
4771         struct pmc *pm;
4772         struct thread *td;
4773         struct pmc_sample *ps, *ps_end;
4774         struct pmc_samplebuffer *psb;
4775         int nsamples, nrecords, pass;
4776 #ifdef  INVARIANTS
4777         int ncallchains;
4778         int nfree;
4779 #endif
4780
4781         psb = pmc_pcpu[cpu]->pc_sb[ring];
4782         td = curthread;
4783
4784         KASSERT(td->td_pflags & TDP_CALLCHAIN,
4785             ("[pmc,%d] Retrieving callchain for thread that doesn't want it",
4786                 __LINE__));
4787
4788 #ifdef  INVARIANTS
4789         ncallchains = 0;
4790         nfree = 0;
4791 #endif
4792         nrecords = INT_MAX;
4793         pass = 0;
4794  restart:
4795         if (ring == PMC_UR)
4796                 nrecords = atomic_readandclear_32(&td->td_pmcpend);
4797
4798         /*
4799          * Iterate through all deferred callchain requests.
4800          * Walk from the current read pointer to the current
4801          * write pointer.
4802          */
4803
4804         ps = psb->ps_read;
4805         ps_end = psb->ps_write;
4806         do {
4807 #ifdef  INVARIANTS
4808                 if (ps->ps_nsamples == PMC_SAMPLE_FREE) {
4809                         nfree++;
4810                         goto next;
4811                 }
4812
4813                 if ((ps->ps_pmc == NULL) ||
4814                     (ps->ps_pmc->pm_state != PMC_STATE_RUNNING))
4815                         nfree++;
4816 #endif
4817                 if (ps->ps_nsamples != PMC_SAMPLE_INUSE)
4818                         goto next;
4819                 if (ps->ps_td != td)
4820                         goto next;
4821
4822                 KASSERT(ps->ps_cpu == cpu,
4823                     ("[pmc,%d] cpu mismatch ps_cpu=%d pcpu=%d", __LINE__,
4824                         ps->ps_cpu, PCPU_GET(cpuid)));
4825
4826                 pm = ps->ps_pmc;
4827
4828                 KASSERT(pm->pm_flags & PMC_F_CALLCHAIN,
4829                     ("[pmc,%d] Retrieving callchain for PMC that doesn't "
4830                         "want it", __LINE__));
4831
4832                 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,
4833                     ("[pmc,%d] runcount %ld", __LINE__, (unsigned long)counter_u64_fetch(pm->pm_runcount)));
4834
4835                 if (ring == PMC_UR) {
4836                         nsamples = ps->ps_nsamples_actual;
4837                         counter_u64_add(pmc_stats.pm_merges, 1);
4838                 } else
4839                         nsamples = 0;
4840
4841                 /*
4842                  * Retrieve the callchain and mark the sample buffer
4843                  * as 'processable' by the timer tick sweep code.
4844                  */
4845
4846 #ifdef INVARIANTS
4847                 ncallchains++;
4848 #endif
4849
4850                 if (__predict_true(nsamples < pmc_callchaindepth - 1))
4851                         nsamples += pmc_save_user_callchain(ps->ps_pc + nsamples,
4852                        pmc_callchaindepth - nsamples - 1, tf);
4853                 wmb();
4854                 ps->ps_nsamples = nsamples;
4855                 if (nrecords-- == 1)
4856                         break;
4857 next:
4858                 /* increment the pointer, modulo sample ring size */
4859                 if (++ps == psb->ps_fence)
4860                         ps = psb->ps_samples;
4861         } while (ps != ps_end);
4862         if (__predict_false(ring == PMC_UR && td->td_pmcpend)) {
4863                 if (pass == 0) {
4864                         pass = 1;
4865                         goto restart;
4866                 }
4867                 /* only collect samples for this part once */
4868                 td->td_pmcpend = 0;
4869         }
4870
4871 #ifdef INVARIANTS
4872         if (ring == PMC_HR)
4873                 KASSERT(ncallchains > 0 || nfree > 0,
4874                     ("[pmc,%d] cpu %d didn't find a sample to collect", __LINE__,
4875                             cpu));
4876 #endif
4877
4878         /* mark CPU as needing processing */
4879         DPCPU_SET(pmc_sampled, 1);
4880 }
4881
4882
4883 static void
4884 pmc_flush_ring(int cpu, int ring)
4885 {
4886         struct pmc *pm;
4887         struct pmc_sample *ps;
4888         struct pmc_samplebuffer *psb;
4889         int n;
4890
4891         psb = pmc_pcpu[cpu]->pc_sb[ring];
4892
4893         for (n = 0; n < pmc_nsamples; n++) { /* bound on #iterations */
4894
4895                 ps = psb->ps_read;
4896                 if (ps->ps_nsamples == PMC_SAMPLE_FREE)
4897                         goto next;
4898                 pm = ps->ps_pmc;
4899                 counter_u64_add(pm->pm_runcount, -1);
4900                 ps->ps_nsamples = PMC_SAMPLE_FREE;
4901                 /* increment read pointer, modulo sample size */
4902         next:
4903                 if (++ps == psb->ps_fence)
4904                         psb->ps_read = psb->ps_samples;
4905                 else
4906                         psb->ps_read = ps;
4907         }
4908 }
4909
4910 void
4911 pmc_flush_samples(int cpu)
4912 {
4913         int n;
4914
4915         for (n = 0; n < PMC_NUM_SR; n++)
4916                 pmc_flush_ring(cpu, n);
4917 }
4918
4919
4920 /*
4921  * Process saved PC samples.
4922  */
4923
4924 static void
4925 pmc_process_samples(int cpu, int ring)
4926 {
4927         struct pmc *pm;
4928         int adjri, n;
4929         struct thread *td;
4930         struct pmc_owner *po;
4931         struct pmc_sample *ps;
4932         struct pmc_classdep *pcd;
4933         struct pmc_samplebuffer *psb;
4934
4935         KASSERT(PCPU_GET(cpuid) == cpu,
4936             ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", __LINE__,
4937                 PCPU_GET(cpuid), cpu));
4938
4939         psb = pmc_pcpu[cpu]->pc_sb[ring];
4940
4941         for (n = 0; n < pmc_nsamples; n++) { /* bound on #iterations */
4942
4943                 ps = psb->ps_read;
4944                 if (ps->ps_nsamples == PMC_SAMPLE_FREE)
4945                         break;
4946
4947                 pm = ps->ps_pmc;
4948
4949                 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,
4950                     ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm,
4951                          (unsigned long)counter_u64_fetch(pm->pm_runcount)));
4952
4953                 po = pm->pm_owner;
4954
4955                 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
4956                     ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__,
4957                         pm, PMC_TO_MODE(pm)));
4958
4959                 /* Ignore PMCs that have been switched off */
4960                 if (pm->pm_state != PMC_STATE_RUNNING)
4961                         goto entrydone;
4962
4963                 /* If there is a pending AST wait for completion */
4964                 if (ps->ps_nsamples == PMC_SAMPLE_INUSE) {
4965                         /* Need a rescan at a later time. */
4966                         DPCPU_SET(pmc_sampled, 1);
4967                         break;
4968                 }
4969
4970                 PMCDBG6(SAM,OPS,1,"cpu=%d pm=%p n=%d fl=%x wr=%d rd=%d", cpu,
4971                     pm, ps->ps_nsamples, ps->ps_flags,
4972                     (int) (psb->ps_write - psb->ps_samples),
4973                     (int) (psb->ps_read - psb->ps_samples));
4974
4975                 /*
4976                  * If this is a process-mode PMC that is attached to
4977                  * its owner, and if the PC is in user mode, update
4978                  * profiling statistics like timer-based profiling
4979                  * would have done.
4980                  *
4981                  * Otherwise, this is either a sampling-mode PMC that
4982                  * is attached to a different process than its owner,
4983                  * or a system-wide sampling PMC. Dispatch a log
4984                  * entry to the PMC's owner process.
4985                  */
4986                 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) {
4987                         if (ps->ps_flags & PMC_CC_F_USERSPACE) {
4988                                 td = FIRST_THREAD_IN_PROC(po->po_owner);
4989                                 addupc_intr(td, ps->ps_pc[0], 1);
4990                         }
4991                 } else
4992                         pmclog_process_callchain(pm, ps);
4993
4994         entrydone:
4995                 ps->ps_nsamples = 0; /* mark entry as free */
4996                 counter_u64_add(pm->pm_runcount, -1);
4997
4998                 /* increment read pointer, modulo sample size */
4999                 if (++ps == psb->ps_fence)
5000                         psb->ps_read = psb->ps_samples;
5001                 else
5002                         psb->ps_read = ps;
5003         }
5004
5005         counter_u64_add(pmc_stats.pm_log_sweeps, 1);
5006
5007         /* Do not re-enable stalled PMCs if we failed to process any samples */
5008         if (n == 0)
5009                 return;
5010
5011         /*
5012          * Restart any stalled sampling PMCs on this CPU.
5013          *
5014          * If the NMI handler sets the pm_stalled field of a PMC after
5015          * the check below, we'll end up processing the stalled PMC at
5016          * the next hardclock tick.
5017          */
5018         for (n = 0; n < md->pmd_npmc; n++) {
5019                 pcd = pmc_ri_to_classdep(md, n, &adjri);
5020                 KASSERT(pcd != NULL,
5021                     ("[pmc,%d] null pcd ri=%d", __LINE__, n));
5022                 (void) (*pcd->pcd_get_config)(cpu,adjri,&pm);
5023
5024                 if (pm == NULL ||                        /* !cfg'ed */
5025                     pm->pm_state != PMC_STATE_RUNNING || /* !active */
5026                     !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) || /* !sampling */
5027                         !pm->pm_pcpu_state[cpu].pps_cpustate  || /* !desired */
5028                     !pm->pm_pcpu_state[cpu].pps_stalled) /* !stalled */
5029                         continue;
5030
5031                 pm->pm_pcpu_state[cpu].pps_stalled = 0;
5032                 (*pcd->pcd_start_pmc)(cpu, adjri);
5033         }
5034 }
5035
5036 /*
5037  * Event handlers.
5038  */
5039
5040 /*
5041  * Handle a process exit.
5042  *
5043  * Remove this process from all hash tables.  If this process
5044  * owned any PMCs, turn off those PMCs and deallocate them,
5045  * removing any associations with target processes.
5046  *
5047  * This function will be called by the last 'thread' of a
5048  * process.
5049  *
5050  * XXX This eventhandler gets called early in the exit process.
5051  * Consider using a 'hook' invocation from thread_exit() or equivalent
5052  * spot.  Another negative is that kse_exit doesn't seem to call
5053  * exit1() [??].
5054  *
5055  */
5056
5057 static void
5058 pmc_process_exit(void *arg __unused, struct proc *p)
5059 {
5060         struct pmc *pm;
5061         int adjri, cpu;
5062         unsigned int ri;
5063         int is_using_hwpmcs;
5064         struct pmc_owner *po;
5065         struct pmc_process *pp;
5066         struct pmc_classdep *pcd;
5067         pmc_value_t newvalue, tmp;
5068
5069         PROC_LOCK(p);
5070         is_using_hwpmcs = p->p_flag & P_HWPMC;
5071         PROC_UNLOCK(p);
5072
5073         /*
5074          * Log a sysexit event to all SS PMC owners.
5075          */
5076         PMC_EPOCH_ENTER();
5077         CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
5078             if (po->po_flags & PMC_PO_OWNS_LOGFILE)
5079                     pmclog_process_sysexit(po, p->p_pid);
5080         PMC_EPOCH_EXIT();
5081
5082         if (!is_using_hwpmcs)
5083                 return;
5084
5085         PMC_GET_SX_XLOCK();
5086         PMCDBG3(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid,
5087             p->p_comm);
5088
5089         /*
5090          * Since this code is invoked by the last thread in an exiting
5091          * process, we would have context switched IN at some prior
5092          * point.  However, with PREEMPTION, kernel mode context
5093          * switches may happen any time, so we want to disable a
5094          * context switch OUT till we get any PMCs targeting this
5095          * process off the hardware.
5096          *
5097          * We also need to atomically remove this process'
5098          * entry from our target process hash table, using
5099          * PMC_FLAG_REMOVE.
5100          */
5101         PMCDBG3(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid,
5102             p->p_comm);
5103
5104         critical_enter(); /* no preemption */
5105
5106         cpu = curthread->td_oncpu;
5107
5108         if ((pp = pmc_find_process_descriptor(p,
5109                  PMC_FLAG_REMOVE)) != NULL) {
5110
5111                 PMCDBG2(PRC,EXT,2,
5112                     "process-exit proc=%p pmc-process=%p", p, pp);
5113
5114                 /*
5115                  * The exiting process could the target of
5116                  * some PMCs which will be running on
5117                  * currently executing CPU.
5118                  *
5119                  * We need to turn these PMCs off like we
5120                  * would do at context switch OUT time.
5121                  */
5122                 for (ri = 0; ri < md->pmd_npmc; ri++) {
5123
5124                         /*
5125                          * Pick up the pmc pointer from hardware
5126                          * state similar to the CSW_OUT code.
5127                          */
5128                         pm = NULL;
5129
5130                         pcd = pmc_ri_to_classdep(md, ri, &adjri);
5131
5132                         (void) (*pcd->pcd_get_config)(cpu, adjri, &pm);
5133
5134                         PMCDBG2(PRC,EXT,2, "ri=%d pm=%p", ri, pm);
5135
5136                         if (pm == NULL ||
5137                             !PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
5138                                 continue;
5139
5140                         PMCDBG4(PRC,EXT,2, "ppmcs[%d]=%p pm=%p "
5141                             "state=%d", ri, pp->pp_pmcs[ri].pp_pmc,
5142                             pm, pm->pm_state);
5143
5144                         KASSERT(PMC_TO_ROWINDEX(pm) == ri,
5145                             ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
5146                                 __LINE__, PMC_TO_ROWINDEX(pm), ri));
5147
5148                         KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
5149                             ("[pmc,%d] pm %p != pp_pmcs[%d] %p",
5150                                 __LINE__, pm, ri, pp->pp_pmcs[ri].pp_pmc));
5151
5152                         KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,
5153                             ("[pmc,%d] bad runcount ri %d rc %ld",
5154                                  __LINE__, ri, (unsigned long)counter_u64_fetch(pm->pm_runcount)));
5155
5156                         /*
5157                          * Change desired state, and then stop if not
5158                          * stalled. This two-step dance should avoid
5159                          * race conditions where an interrupt re-enables
5160                          * the PMC after this code has already checked
5161                          * the pm_stalled flag.
5162                          */
5163                         if (pm->pm_pcpu_state[cpu].pps_cpustate) {
5164                                 pm->pm_pcpu_state[cpu].pps_cpustate = 0;
5165                                 if (!pm->pm_pcpu_state[cpu].pps_stalled) {
5166                                         (void) pcd->pcd_stop_pmc(cpu, adjri);
5167
5168                                         if (PMC_TO_MODE(pm) == PMC_MODE_TC) {
5169                                                 pcd->pcd_read_pmc(cpu, adjri,
5170                                                     &newvalue);
5171                                                 tmp = newvalue -
5172                                                     PMC_PCPU_SAVED(cpu,ri);
5173
5174                                                 mtx_pool_lock_spin(pmc_mtxpool,
5175                                                     pm);
5176                                                 pm->pm_gv.pm_savedvalue += tmp;
5177                                                 pp->pp_pmcs[ri].pp_pmcval +=
5178                                                     tmp;
5179                                                 mtx_pool_unlock_spin(
5180                                                     pmc_mtxpool, pm);
5181                                         }
5182                                 }
5183                         }
5184
5185                         counter_u64_add(pm->pm_runcount, -1);
5186
5187                         KASSERT((int) counter_u64_fetch(pm->pm_runcount) >= 0,
5188                             ("[pmc,%d] runcount is %d", __LINE__, ri));
5189
5190                         (void) pcd->pcd_config_pmc(cpu, adjri, NULL);
5191                 }
5192
5193                 /*
5194                  * Inform the MD layer of this pseudo "context switch
5195                  * out"
5196                  */
5197                 (void) md->pmd_switch_out(pmc_pcpu[cpu], pp);
5198
5199                 critical_exit(); /* ok to be pre-empted now */
5200
5201                 /*
5202                  * Unlink this process from the PMCs that are
5203                  * targeting it.  This will send a signal to
5204                  * all PMC owner's whose PMCs are orphaned.
5205                  *
5206                  * Log PMC value at exit time if requested.
5207                  */
5208                 for (ri = 0; ri < md->pmd_npmc; ri++)
5209                         if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
5210                                 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
5211                                     PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm)))
5212                                         pmclog_process_procexit(pm, pp);
5213                                 pmc_unlink_target_process(pm, pp);
5214                         }
5215                 free(pp, M_PMC);
5216
5217         } else
5218                 critical_exit(); /* pp == NULL */
5219
5220
5221         /*
5222          * If the process owned PMCs, free them up and free up
5223          * memory.
5224          */
5225         if ((po = pmc_find_owner_descriptor(p)) != NULL) {
5226                 pmc_remove_owner(po);
5227                 pmc_destroy_owner_descriptor(po);
5228         }
5229
5230         sx_xunlock(&pmc_sx);
5231 }
5232
5233 /*
5234  * Handle a process fork.
5235  *
5236  * If the parent process 'p1' is under HWPMC monitoring, then copy
5237  * over any attached PMCs that have 'do_descendants' semantics.
5238  */
5239
5240 static void
5241 pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *newproc,
5242     int flags)
5243 {
5244         int is_using_hwpmcs;
5245         unsigned int ri;
5246         uint32_t do_descendants;
5247         struct pmc *pm;
5248         struct pmc_owner *po;
5249         struct pmc_process *ppnew, *ppold;
5250
5251         (void) flags;           /* unused parameter */
5252
5253         PROC_LOCK(p1);
5254         is_using_hwpmcs = p1->p_flag & P_HWPMC;
5255         PROC_UNLOCK(p1);
5256
5257         /*
5258          * If there are system-wide sampling PMCs active, we need to
5259          * log all fork events to their owner's logs.
5260          */
5261         PMC_EPOCH_ENTER();
5262         CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
5263             if (po->po_flags & PMC_PO_OWNS_LOGFILE) {
5264                     pmclog_process_procfork(po, p1->p_pid, newproc->p_pid);
5265                         pmclog_process_proccreate(po, newproc, 1);
5266                 }
5267         PMC_EPOCH_EXIT();
5268
5269         if (!is_using_hwpmcs)
5270                 return;
5271
5272         PMC_GET_SX_XLOCK();
5273         PMCDBG4(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1,
5274             p1->p_pid, p1->p_comm, newproc);
5275
5276         /*
5277          * If the parent process (curthread->td_proc) is a
5278          * target of any PMCs, look for PMCs that are to be
5279          * inherited, and link these into the new process
5280          * descriptor.
5281          */
5282         if ((ppold = pmc_find_process_descriptor(curthread->td_proc,
5283                  PMC_FLAG_NONE)) == NULL)
5284                 goto done;              /* nothing to do */
5285
5286         do_descendants = 0;
5287         for (ri = 0; ri < md->pmd_npmc; ri++)
5288                 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL)
5289                         do_descendants |= pm->pm_flags & PMC_F_DESCENDANTS;
5290         if (do_descendants == 0) /* nothing to do */
5291                 goto done;
5292
5293         /*
5294          * Now mark the new process as being tracked by this driver.
5295          */
5296         PROC_LOCK(newproc);
5297         newproc->p_flag |= P_HWPMC;
5298         PROC_UNLOCK(newproc);
5299
5300         /* allocate a descriptor for the new process  */
5301         if ((ppnew = pmc_find_process_descriptor(newproc,
5302                  PMC_FLAG_ALLOCATE)) == NULL)
5303                 goto done;
5304
5305         /*
5306          * Run through all PMCs that were targeting the old process
5307          * and which specified F_DESCENDANTS and attach them to the
5308          * new process.
5309          *
5310          * Log the fork event to all owners of PMCs attached to this
5311          * process, if not already logged.
5312          */
5313         for (ri = 0; ri < md->pmd_npmc; ri++)
5314                 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL &&
5315                     (pm->pm_flags & PMC_F_DESCENDANTS)) {
5316                         pmc_link_target_process(pm, ppnew);
5317                         po = pm->pm_owner;
5318                         if (po->po_sscount == 0 &&
5319                             po->po_flags & PMC_PO_OWNS_LOGFILE)
5320                                 pmclog_process_procfork(po, p1->p_pid,
5321                                     newproc->p_pid);
5322                 }
5323
5324  done:
5325         sx_xunlock(&pmc_sx);
5326 }
5327
5328 static void
5329 pmc_process_threadcreate(struct thread *td)
5330 {
5331         struct pmc_owner *po;
5332
5333         PMC_EPOCH_ENTER();
5334         CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
5335             if (po->po_flags & PMC_PO_OWNS_LOGFILE)
5336                         pmclog_process_threadcreate(po, td, 1);
5337         PMC_EPOCH_EXIT();
5338 }
5339
5340 static void
5341 pmc_process_threadexit(struct thread *td)
5342 {
5343         struct pmc_owner *po;
5344
5345         PMC_EPOCH_ENTER();
5346         CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
5347             if (po->po_flags & PMC_PO_OWNS_LOGFILE)
5348                         pmclog_process_threadexit(po, td);
5349         PMC_EPOCH_EXIT();
5350 }
5351
5352 static void
5353 pmc_process_proccreate(struct proc *p)
5354 {
5355         struct pmc_owner *po;
5356
5357         PMC_EPOCH_ENTER();
5358         CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
5359             if (po->po_flags & PMC_PO_OWNS_LOGFILE)
5360                         pmclog_process_proccreate(po, p, 1 /* sync */);
5361         PMC_EPOCH_EXIT();
5362 }
5363
5364 static void
5365 pmc_process_allproc(struct pmc *pm)
5366 {
5367         struct pmc_owner *po;
5368         struct thread *td;
5369         struct proc *p;
5370
5371         po = pm->pm_owner;
5372         if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
5373                 return;
5374         sx_slock(&allproc_lock);
5375         FOREACH_PROC_IN_SYSTEM(p) {
5376                 pmclog_process_proccreate(po, p, 0 /* sync */);
5377                 PROC_LOCK(p);
5378                 FOREACH_THREAD_IN_PROC(p, td)
5379                         pmclog_process_threadcreate(po, td, 0 /* sync */);
5380                 PROC_UNLOCK(p);
5381         }
5382         sx_sunlock(&allproc_lock);
5383         pmclog_flush(po, 0);
5384 }
5385
5386 static void
5387 pmc_kld_load(void *arg __unused, linker_file_t lf)
5388 {
5389         struct pmc_owner *po;
5390
5391         /*
5392          * Notify owners of system sampling PMCs about KLD operations.
5393          */
5394         PMC_EPOCH_ENTER();
5395         CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
5396                 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
5397                         pmclog_process_map_in(po, (pid_t) -1,
5398                             (uintfptr_t) lf->address, lf->filename);
5399         PMC_EPOCH_EXIT();
5400
5401         /*
5402          * TODO: Notify owners of (all) process-sampling PMCs too.
5403          */
5404 }
5405
5406 static void
5407 pmc_kld_unload(void *arg __unused, const char *filename __unused,
5408     caddr_t address, size_t size)
5409 {
5410         struct pmc_owner *po;
5411
5412         PMC_EPOCH_ENTER();
5413         CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
5414                 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
5415                         pmclog_process_map_out(po, (pid_t) -1,
5416                             (uintfptr_t) address, (uintfptr_t) address + size);
5417         PMC_EPOCH_EXIT();
5418
5419         /*
5420          * TODO: Notify owners of process-sampling PMCs.
5421          */
5422 }
5423
5424 /*
5425  * initialization
5426  */
5427 static const char *
5428 pmc_name_of_pmcclass(enum pmc_class class)
5429 {
5430
5431         switch (class) {
5432 #undef  __PMC_CLASS
5433 #define __PMC_CLASS(S,V,D)                                              \
5434         case PMC_CLASS_##S:                                             \
5435                 return #S;
5436         __PMC_CLASSES();
5437         default:
5438                 return ("<unknown>");
5439         }
5440 }
5441
5442 /*
5443  * Base class initializer: allocate structure and set default classes.
5444  */
5445 struct pmc_mdep *
5446 pmc_mdep_alloc(int nclasses)
5447 {
5448         struct pmc_mdep *md;
5449         int     n;
5450
5451         /* SOFT + md classes */
5452         n = 1 + nclasses;
5453         md = malloc(sizeof(struct pmc_mdep) + n *
5454             sizeof(struct pmc_classdep), M_PMC, M_WAITOK|M_ZERO);
5455         md->pmd_nclass = n;
5456
5457         /* Add base class. */
5458         pmc_soft_initialize(md);
5459         return md;
5460 }
5461
5462 void
5463 pmc_mdep_free(struct pmc_mdep *md)
5464 {
5465         pmc_soft_finalize(md);
5466         free(md, M_PMC);
5467 }
5468
5469 static int
5470 generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
5471 {
5472         (void) pc; (void) pp;
5473
5474         return (0);
5475 }
5476
5477 static int
5478 generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
5479 {
5480         (void) pc; (void) pp;
5481
5482         return (0);
5483 }
5484
5485 static struct pmc_mdep *
5486 pmc_generic_cpu_initialize(void)
5487 {
5488         struct pmc_mdep *md;
5489
5490         md = pmc_mdep_alloc(0);
5491
5492         md->pmd_cputype    = PMC_CPU_GENERIC;
5493
5494         md->pmd_pcpu_init  = NULL;
5495         md->pmd_pcpu_fini  = NULL;
5496         md->pmd_switch_in  = generic_switch_in;
5497         md->pmd_switch_out = generic_switch_out;
5498
5499         return (md);
5500 }
5501
5502 static void
5503 pmc_generic_cpu_finalize(struct pmc_mdep *md)
5504 {
5505         (void) md;
5506 }
5507
5508
5509 static int
5510 pmc_initialize(void)
5511 {
5512         int c, cpu, error, n, ri;
5513         unsigned int maxcpu, domain;
5514         struct pcpu *pc;
5515         struct pmc_binding pb;
5516         struct pmc_sample *ps;
5517         struct pmc_classdep *pcd;
5518         struct pmc_samplebuffer *sb;
5519
5520         md = NULL;
5521         error = 0;
5522
5523         pmc_stats.pm_intr_ignored = counter_u64_alloc(M_WAITOK);
5524         pmc_stats.pm_intr_processed = counter_u64_alloc(M_WAITOK);
5525         pmc_stats.pm_intr_bufferfull = counter_u64_alloc(M_WAITOK);
5526         pmc_stats.pm_syscalls = counter_u64_alloc(M_WAITOK);
5527         pmc_stats.pm_syscall_errors = counter_u64_alloc(M_WAITOK);
5528         pmc_stats.pm_buffer_requests = counter_u64_alloc(M_WAITOK);
5529         pmc_stats.pm_buffer_requests_failed = counter_u64_alloc(M_WAITOK);
5530         pmc_stats.pm_log_sweeps = counter_u64_alloc(M_WAITOK);
5531         pmc_stats.pm_merges = counter_u64_alloc(M_WAITOK);
5532         pmc_stats.pm_overwrites = counter_u64_alloc(M_WAITOK);
5533
5534 #ifdef  HWPMC_DEBUG
5535         /* parse debug flags first */
5536         if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags",
5537                 pmc_debugstr, sizeof(pmc_debugstr)))
5538                 pmc_debugflags_parse(pmc_debugstr,
5539                     pmc_debugstr+strlen(pmc_debugstr));
5540 #endif
5541
5542         PMCDBG1(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION);
5543
5544         /* check kernel version */
5545         if (pmc_kernel_version != PMC_VERSION) {
5546                 if (pmc_kernel_version == 0)
5547                         printf("hwpmc: this kernel has not been compiled with "
5548                             "'options HWPMC_HOOKS'.\n");
5549                 else
5550                         printf("hwpmc: kernel version (0x%x) does not match "
5551                             "module version (0x%x).\n", pmc_kernel_version,
5552                             PMC_VERSION);
5553                 return EPROGMISMATCH;
5554         }
5555
5556         /*
5557          * check sysctl parameters
5558          */
5559
5560         if (pmc_hashsize <= 0) {
5561                 (void) printf("hwpmc: tunable \"hashsize\"=%d must be "
5562                     "greater than zero.\n", pmc_hashsize);
5563                 pmc_hashsize = PMC_HASH_SIZE;
5564         }
5565
5566         if (pmc_nsamples <= 0 || pmc_nsamples > 65535) {
5567                 (void) printf("hwpmc: tunable \"nsamples\"=%d out of "
5568                     "range.\n", pmc_nsamples);
5569                 pmc_nsamples = PMC_NSAMPLES;
5570         }
5571
5572         if (pmc_callchaindepth <= 0 ||
5573             pmc_callchaindepth > PMC_CALLCHAIN_DEPTH_MAX) {
5574                 (void) printf("hwpmc: tunable \"callchaindepth\"=%d out of "
5575                     "range - using %d.\n", pmc_callchaindepth,
5576                     PMC_CALLCHAIN_DEPTH_MAX);
5577                 pmc_callchaindepth = PMC_CALLCHAIN_DEPTH_MAX;
5578         }
5579
5580         md = pmc_md_initialize();
5581         if (md == NULL) {
5582                 /* Default to generic CPU. */
5583                 md = pmc_generic_cpu_initialize();
5584                 if (md == NULL)
5585                         return (ENOSYS);
5586         }
5587
5588         KASSERT(md->pmd_nclass >= 1 && md->pmd_npmc >= 1,
5589             ("[pmc,%d] no classes or pmcs", __LINE__));
5590
5591         /* Compute the map from row-indices to classdep pointers. */
5592         pmc_rowindex_to_classdep = malloc(sizeof(struct pmc_classdep *) *
5593             md->pmd_npmc, M_PMC, M_WAITOK|M_ZERO);
5594
5595         for (n = 0; n < md->pmd_npmc; n++)
5596                 pmc_rowindex_to_classdep[n] = NULL;
5597         for (ri = c = 0; c < md->pmd_nclass; c++) {
5598                 pcd = &md->pmd_classdep[c];
5599                 for (n = 0; n < pcd->pcd_num; n++, ri++)
5600                         pmc_rowindex_to_classdep[ri] = pcd;
5601         }
5602
5603         KASSERT(ri == md->pmd_npmc,
5604             ("[pmc,%d] npmc miscomputed: ri=%d, md->npmc=%d", __LINE__,
5605             ri, md->pmd_npmc));
5606
5607         maxcpu = pmc_cpu_max();
5608
5609         /* allocate space for the per-cpu array */
5610         pmc_pcpu = malloc(maxcpu * sizeof(struct pmc_cpu *), M_PMC,
5611             M_WAITOK|M_ZERO);
5612
5613         /* per-cpu 'saved values' for managing process-mode PMCs */
5614         pmc_pcpu_saved = malloc(sizeof(pmc_value_t) * maxcpu * md->pmd_npmc,
5615             M_PMC, M_WAITOK);
5616
5617         /* Perform CPU-dependent initialization. */
5618         pmc_save_cpu_binding(&pb);
5619         error = 0;
5620         for (cpu = 0; error == 0 && cpu < maxcpu; cpu++) {
5621                 if (!pmc_cpu_is_active(cpu))
5622                         continue;
5623                 pmc_select_cpu(cpu);
5624                 pmc_pcpu[cpu] = malloc(sizeof(struct pmc_cpu) +
5625                     md->pmd_npmc * sizeof(struct pmc_hw *), M_PMC,
5626                     M_WAITOK|M_ZERO);
5627                 if (md->pmd_pcpu_init)
5628                         error = md->pmd_pcpu_init(md, cpu);
5629                 for (n = 0; error == 0 && n < md->pmd_nclass; n++)
5630                         error = md->pmd_classdep[n].pcd_pcpu_init(md, cpu);
5631         }
5632         pmc_restore_cpu_binding(&pb);
5633
5634         if (error)
5635                 return (error);
5636
5637         /* allocate space for the sample array */
5638         for (cpu = 0; cpu < maxcpu; cpu++) {
5639                 if (!pmc_cpu_is_active(cpu))
5640                         continue;
5641                 pc = pcpu_find(cpu);
5642                 domain = pc->pc_domain;
5643                 sb = malloc_domain(sizeof(struct pmc_samplebuffer) +
5644                         pmc_nsamples * sizeof(struct pmc_sample), M_PMC, domain,
5645                     M_WAITOK|M_ZERO);
5646                 sb->ps_read = sb->ps_write = sb->ps_samples;
5647                 sb->ps_fence = sb->ps_samples + pmc_nsamples;
5648
5649                 KASSERT(pmc_pcpu[cpu] != NULL,
5650                     ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
5651
5652                 sb->ps_callchains = malloc_domain(pmc_callchaindepth * pmc_nsamples *
5653                         sizeof(uintptr_t), M_PMC, domain, M_WAITOK|M_ZERO);
5654
5655                 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
5656                         ps->ps_pc = sb->ps_callchains +
5657                             (n * pmc_callchaindepth);
5658
5659                 pmc_pcpu[cpu]->pc_sb[PMC_HR] = sb;
5660
5661                 sb = malloc_domain(sizeof(struct pmc_samplebuffer) +
5662                         pmc_nsamples * sizeof(struct pmc_sample), M_PMC, domain,
5663                     M_WAITOK|M_ZERO);
5664                 sb->ps_read = sb->ps_write = sb->ps_samples;
5665                 sb->ps_fence = sb->ps_samples + pmc_nsamples;
5666
5667                 KASSERT(pmc_pcpu[cpu] != NULL,
5668                     ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
5669
5670                 sb->ps_callchains = malloc_domain(pmc_callchaindepth * pmc_nsamples *
5671                         sizeof(uintptr_t), M_PMC, domain, M_WAITOK|M_ZERO);
5672
5673                 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
5674                         ps->ps_pc = sb->ps_callchains +
5675                             (n * pmc_callchaindepth);
5676
5677                 pmc_pcpu[cpu]->pc_sb[PMC_SR] = sb;
5678
5679                 sb = malloc_domain(sizeof(struct pmc_samplebuffer) +
5680                         pmc_nsamples * sizeof(struct pmc_sample), M_PMC, domain,
5681                     M_WAITOK|M_ZERO);
5682                 sb->ps_read = sb->ps_write = sb->ps_samples;
5683                 sb->ps_fence = sb->ps_samples + pmc_nsamples;
5684
5685                 KASSERT(pmc_pcpu[cpu] != NULL,
5686                     ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
5687
5688                 sb->ps_callchains = malloc_domain(pmc_callchaindepth * pmc_nsamples *
5689                     sizeof(uintptr_t), M_PMC, domain, M_WAITOK|M_ZERO);
5690
5691                 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
5692                         ps->ps_pc = sb->ps_callchains +
5693                             (n * pmc_callchaindepth);
5694
5695                 pmc_pcpu[cpu]->pc_sb[PMC_UR] = sb;
5696         }
5697
5698         /* allocate space for the row disposition array */
5699         pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc,
5700             M_PMC, M_WAITOK|M_ZERO);
5701
5702         /* mark all PMCs as available */
5703         for (n = 0; n < (int) md->pmd_npmc; n++)
5704                 PMC_MARK_ROW_FREE(n);
5705
5706         /* allocate thread hash tables */
5707         pmc_ownerhash = hashinit(pmc_hashsize, M_PMC,
5708             &pmc_ownerhashmask);
5709
5710         pmc_processhash = hashinit(pmc_hashsize, M_PMC,
5711             &pmc_processhashmask);
5712         mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf",
5713             MTX_SPIN);
5714
5715         CK_LIST_INIT(&pmc_ss_owners);
5716         pmc_ss_count = 0;
5717
5718         /* allocate a pool of spin mutexes */
5719         pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size,
5720             MTX_SPIN);
5721
5722         PMCDBG4(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx "
5723             "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask,
5724             pmc_processhash, pmc_processhashmask);
5725
5726         /* Initialize a spin mutex for the thread free list. */
5727         mtx_init(&pmc_threadfreelist_mtx, "pmc-threadfreelist", "pmc-leaf",
5728             MTX_SPIN);
5729
5730         /*
5731          * Initialize the callout to monitor the thread free list.
5732          * This callout will also handle the initial population of the list.
5733          */
5734         taskqgroup_config_gtask_init(NULL, &free_gtask, pmc_thread_descriptor_pool_free_task, "thread descriptor pool free task");
5735
5736         /* register process {exit,fork,exec} handlers */
5737         pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit,
5738             pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY);
5739         pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork,
5740             pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY);
5741
5742         /* register kld event handlers */
5743         pmc_kld_load_tag = EVENTHANDLER_REGISTER(kld_load, pmc_kld_load,
5744             NULL, EVENTHANDLER_PRI_ANY);
5745         pmc_kld_unload_tag = EVENTHANDLER_REGISTER(kld_unload, pmc_kld_unload,
5746             NULL, EVENTHANDLER_PRI_ANY);
5747
5748         /* initialize logging */
5749         pmclog_initialize();
5750
5751         /* set hook functions */
5752         pmc_intr = md->pmd_intr;
5753         wmb();
5754         pmc_hook = pmc_hook_handler;
5755
5756         if (error == 0) {
5757                 printf(PMC_MODULE_NAME ":");
5758                 for (n = 0; n < (int) md->pmd_nclass; n++) {
5759                         pcd = &md->pmd_classdep[n];
5760                         printf(" %s/%d/%d/0x%b",
5761                             pmc_name_of_pmcclass(pcd->pcd_class),
5762                             pcd->pcd_num,
5763                             pcd->pcd_width,
5764                             pcd->pcd_caps,
5765                             "\20"
5766                             "\1INT\2USR\3SYS\4EDG\5THR"
5767                             "\6REA\7WRI\10INV\11QUA\12PRC"
5768                             "\13TAG\14CSC");
5769                 }
5770                 printf("\n");
5771         }
5772
5773         return (error);
5774 }
5775
5776 /* prepare to be unloaded */
5777 static void
5778 pmc_cleanup(void)
5779 {
5780         int c, cpu;
5781         unsigned int maxcpu;
5782         struct pmc_ownerhash *ph;
5783         struct pmc_owner *po, *tmp;
5784         struct pmc_binding pb;
5785 #ifdef  HWPMC_DEBUG
5786         struct pmc_processhash *prh;
5787 #endif
5788
5789         PMCDBG0(MOD,INI,0, "cleanup");
5790
5791         /* switch off sampling */
5792         CPU_FOREACH(cpu)
5793                 DPCPU_ID_SET(cpu, pmc_sampled, 0);
5794         pmc_intr = NULL;
5795
5796         sx_xlock(&pmc_sx);
5797         if (pmc_hook == NULL) { /* being unloaded already */
5798                 sx_xunlock(&pmc_sx);
5799                 return;
5800         }
5801
5802         pmc_hook = NULL; /* prevent new threads from entering module */
5803
5804         /* deregister event handlers */
5805         EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag);
5806         EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag);
5807         EVENTHANDLER_DEREGISTER(kld_load, pmc_kld_load_tag);
5808         EVENTHANDLER_DEREGISTER(kld_unload, pmc_kld_unload_tag);
5809
5810         /* send SIGBUS to all owner threads, free up allocations */
5811         if (pmc_ownerhash)
5812                 for (ph = pmc_ownerhash;
5813                      ph <= &pmc_ownerhash[pmc_ownerhashmask];
5814                      ph++) {
5815                         LIST_FOREACH_SAFE(po, ph, po_next, tmp) {
5816                                 pmc_remove_owner(po);
5817
5818                                 /* send SIGBUS to owner processes */
5819                                 PMCDBG3(MOD,INI,2, "cleanup signal proc=%p "
5820                                     "(%d, %s)", po->po_owner,
5821                                     po->po_owner->p_pid,
5822                                     po->po_owner->p_comm);
5823
5824                                 PROC_LOCK(po->po_owner);
5825                                 kern_psignal(po->po_owner, SIGBUS);
5826                                 PROC_UNLOCK(po->po_owner);
5827
5828                                 pmc_destroy_owner_descriptor(po);
5829                         }
5830                 }
5831
5832         /* reclaim allocated data structures */
5833         mtx_destroy(&pmc_threadfreelist_mtx);
5834         pmc_thread_descriptor_pool_drain();
5835
5836         if (pmc_mtxpool)
5837                 mtx_pool_destroy(&pmc_mtxpool);
5838
5839         mtx_destroy(&pmc_processhash_mtx);
5840         taskqgroup_config_gtask_deinit(&free_gtask);
5841         if (pmc_processhash) {
5842 #ifdef  HWPMC_DEBUG
5843                 struct pmc_process *pp;
5844
5845                 PMCDBG0(MOD,INI,3, "destroy process hash");
5846                 for (prh = pmc_processhash;
5847                      prh <= &pmc_processhash[pmc_processhashmask];
5848                      prh++)
5849                         LIST_FOREACH(pp, prh, pp_next)
5850                             PMCDBG1(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid);
5851 #endif
5852
5853                 hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask);
5854                 pmc_processhash = NULL;
5855         }
5856
5857         if (pmc_ownerhash) {
5858                 PMCDBG0(MOD,INI,3, "destroy owner hash");
5859                 hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask);
5860                 pmc_ownerhash = NULL;
5861         }
5862
5863         KASSERT(CK_LIST_EMPTY(&pmc_ss_owners),
5864             ("[pmc,%d] Global SS owner list not empty", __LINE__));
5865         KASSERT(pmc_ss_count == 0,
5866             ("[pmc,%d] Global SS count not empty", __LINE__));
5867
5868         /* do processor and pmc-class dependent cleanup */
5869         maxcpu = pmc_cpu_max();
5870
5871         PMCDBG0(MOD,INI,3, "md cleanup");
5872         if (md) {
5873                 pmc_save_cpu_binding(&pb);
5874                 for (cpu = 0; cpu < maxcpu; cpu++) {
5875                         PMCDBG2(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p",
5876                             cpu, pmc_pcpu[cpu]);
5877                         if (!pmc_cpu_is_active(cpu) || pmc_pcpu[cpu] == NULL)
5878                                 continue;
5879                         pmc_select_cpu(cpu);
5880                         for (c = 0; c < md->pmd_nclass; c++)
5881                                 md->pmd_classdep[c].pcd_pcpu_fini(md, cpu);
5882                         if (md->pmd_pcpu_fini)
5883                                 md->pmd_pcpu_fini(md, cpu);
5884                 }
5885
5886                 if (md->pmd_cputype == PMC_CPU_GENERIC)
5887                         pmc_generic_cpu_finalize(md);
5888                 else
5889                         pmc_md_finalize(md);
5890
5891                 pmc_mdep_free(md);
5892                 md = NULL;
5893                 pmc_restore_cpu_binding(&pb);
5894         }
5895
5896         /* Free per-cpu descriptors. */
5897         for (cpu = 0; cpu < maxcpu; cpu++) {
5898                 if (!pmc_cpu_is_active(cpu))
5899                         continue;
5900                 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_HR] != NULL,
5901                     ("[pmc,%d] Null hw cpu sample buffer cpu=%d", __LINE__,
5902                         cpu));
5903                 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_SR] != NULL,
5904                     ("[pmc,%d] Null sw cpu sample buffer cpu=%d", __LINE__,
5905                         cpu));
5906                 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_UR] != NULL,
5907                     ("[pmc,%d] Null userret cpu sample buffer cpu=%d", __LINE__,
5908                         cpu));
5909                 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_HR]->ps_callchains, M_PMC);
5910                 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_HR], M_PMC);
5911                 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_SR]->ps_callchains, M_PMC);
5912                 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_SR], M_PMC);
5913                 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_UR]->ps_callchains, M_PMC);
5914                 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_UR], M_PMC);
5915                 free_domain(pmc_pcpu[cpu], M_PMC);
5916         }
5917
5918         free(pmc_pcpu, M_PMC);
5919         pmc_pcpu = NULL;
5920
5921         free(pmc_pcpu_saved, M_PMC);
5922         pmc_pcpu_saved = NULL;
5923
5924         if (pmc_pmcdisp) {
5925                 free(pmc_pmcdisp, M_PMC);
5926                 pmc_pmcdisp = NULL;
5927         }
5928
5929         if (pmc_rowindex_to_classdep) {
5930                 free(pmc_rowindex_to_classdep, M_PMC);
5931                 pmc_rowindex_to_classdep = NULL;
5932         }
5933
5934         pmclog_shutdown();
5935         counter_u64_free(pmc_stats.pm_intr_ignored);
5936         counter_u64_free(pmc_stats.pm_intr_processed);
5937         counter_u64_free(pmc_stats.pm_intr_bufferfull);
5938         counter_u64_free(pmc_stats.pm_syscalls);
5939         counter_u64_free(pmc_stats.pm_syscall_errors);
5940         counter_u64_free(pmc_stats.pm_buffer_requests);
5941         counter_u64_free(pmc_stats.pm_buffer_requests_failed);
5942         counter_u64_free(pmc_stats.pm_log_sweeps);
5943         counter_u64_free(pmc_stats.pm_merges);
5944         counter_u64_free(pmc_stats.pm_overwrites);
5945         sx_xunlock(&pmc_sx);    /* we are done */
5946 }
5947
5948 /*
5949  * The function called at load/unload.
5950  */
5951
5952 static int
5953 load (struct module *module __unused, int cmd, void *arg __unused)
5954 {
5955         int error;
5956
5957         error = 0;
5958
5959         switch (cmd) {
5960         case MOD_LOAD :
5961                 /* initialize the subsystem */
5962                 error = pmc_initialize();
5963                 if (error != 0)
5964                         break;
5965                 PMCDBG2(MOD,INI,1, "syscall=%d maxcpu=%d",
5966                     pmc_syscall_num, pmc_cpu_max());
5967                 break;
5968
5969
5970         case MOD_UNLOAD :
5971         case MOD_SHUTDOWN:
5972                 pmc_cleanup();
5973                 PMCDBG0(MOD,INI,1, "unloaded");
5974                 break;
5975
5976         default :
5977                 error = EINVAL; /* XXX should panic(9) */
5978                 break;
5979         }
5980
5981         return error;
5982 }