2 * Copyright (c) 2003-2008, Joseph Koshy
3 * Copyright (c) 2007 The FreeBSD Foundation
6 * Portions of this software were developed by A. Joseph Koshy under
7 * sponsorship from the FreeBSD Foundation and Google, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include <dev/hwpmc/pmc_events.h>
38 #include <machine/pmc_mdep.h>
39 #include <machine/profile.h>
41 #define PMC_MODULE_NAME "hwpmc"
42 #define PMC_NAME_MAX 64 /* HW counter name size */
43 #define PMC_CLASS_MAX 8 /* max #classes of PMCs per-system */
46 * Kernel<->userland API version number [MMmmpppp]
48 * Major numbers are to be incremented when an incompatible change to
49 * the ABI occurs that older clients will not be able to handle.
51 * Minor numbers are incremented when a backwards compatible change
52 * occurs that allows older correct programs to run unchanged. For
53 * example, when support for a new PMC type is added.
55 * The patch version is incremented for every bug fix.
57 #define PMC_VERSION_MAJOR 0x03
58 #define PMC_VERSION_MINOR 0x01
59 #define PMC_VERSION_PATCH 0x0000
61 #define PMC_VERSION (PMC_VERSION_MAJOR << 24 | \
62 PMC_VERSION_MINOR << 16 | PMC_VERSION_PATCH)
65 * Kinds of CPUs known.
67 * We keep track of CPU variants that need to be distinguished in
68 * some way for PMC operations. CPU names are grouped by manufacturer
69 * and numbered sparsely in order to minimize changes to the ABI involved
70 * when new CPUs are added.
73 #define __PMC_CPUS() \
74 __PMC_CPU(AMD_K7, 0x00, "AMD K7") \
75 __PMC_CPU(AMD_K8, 0x01, "AMD K8") \
76 __PMC_CPU(INTEL_P5, 0x80, "Intel Pentium") \
77 __PMC_CPU(INTEL_P6, 0x81, "Intel Pentium Pro") \
78 __PMC_CPU(INTEL_CL, 0x82, "Intel Celeron") \
79 __PMC_CPU(INTEL_PII, 0x83, "Intel Pentium II") \
80 __PMC_CPU(INTEL_PIII, 0x84, "Intel Pentium III") \
81 __PMC_CPU(INTEL_PM, 0x85, "Intel Pentium M") \
82 __PMC_CPU(INTEL_PIV, 0x86, "Intel Pentium IV") \
83 __PMC_CPU(INTEL_CORE, 0x87, "Intel Core Solo/Duo") \
84 __PMC_CPU(INTEL_CORE2, 0x88, "Intel Core2") \
85 __PMC_CPU(INTEL_CORE2EXTREME, 0x89, "Intel Core2 Extreme") \
86 __PMC_CPU(INTEL_ATOM, 0x8A, "Intel Atom") \
87 __PMC_CPU(INTEL_COREI7, 0x8B, "Intel Core i7") \
88 __PMC_CPU(INTEL_WESTMERE, 0x8C, "Intel Westmere") \
89 __PMC_CPU(INTEL_SANDYBRIDGE, 0x8D, "Intel Sandy Bridge") \
90 __PMC_CPU(INTEL_IVYBRIDGE, 0x8E, "Intel Ivy Bridge") \
91 __PMC_CPU(INTEL_SANDYBRIDGE_XEON, 0x8F, "Intel Sandy Bridge Xeon") \
92 __PMC_CPU(INTEL_IVYBRIDGE_XEON, 0x90, "Intel Ivy Bridge Xeon") \
93 __PMC_CPU(INTEL_HASWELL, 0x91, "Intel Haswell") \
94 __PMC_CPU(INTEL_ATOM_SILVERMONT, 0x92, "Intel Atom Silvermont") \
95 __PMC_CPU(INTEL_NEHALEM_EX, 0x93, "Intel Nehalem Xeon 7500") \
96 __PMC_CPU(INTEL_WESTMERE_EX, 0x94, "Intel Westmere Xeon E7") \
97 __PMC_CPU(INTEL_HASWELL_XEON, 0x95, "Intel Haswell Xeon E5 v3") \
98 __PMC_CPU(INTEL_BROADWELL, 0x96, "Intel Broadwell") \
99 __PMC_CPU(INTEL_BROADWELL_XEON, 0x97, "Intel Broadwell Xeon") \
100 __PMC_CPU(INTEL_SKYLAKE, 0x98, "Intel Skylake") \
101 __PMC_CPU(INTEL_SKYLAKE_XEON, 0x99, "Intel Skylake Xeon") \
102 __PMC_CPU(INTEL_XSCALE, 0x100, "Intel XScale") \
103 __PMC_CPU(MIPS_24K, 0x200, "MIPS 24K") \
104 __PMC_CPU(MIPS_OCTEON, 0x201, "Cavium Octeon") \
105 __PMC_CPU(PPC_7450, 0x300, "PowerPC MPC7450") \
106 __PMC_CPU(PPC_970, 0x380, "IBM PowerPC 970") \
107 __PMC_CPU(GENERIC, 0x400, "Generic")
111 #define __PMC_CPU(S,V,D) PMC_CPU_##S = V,
115 #define PMC_CPU_FIRST PMC_CPU_AMD_K7
116 #define PMC_CPU_LAST PMC_CPU_GENERIC
122 #define __PMC_CLASSES() \
123 __PMC_CLASS(TSC) /* CPU Timestamp counter */ \
124 __PMC_CLASS(K7) /* AMD K7 performance counters */ \
125 __PMC_CLASS(K8) /* AMD K8 performance counters */ \
126 __PMC_CLASS(P5) /* Intel Pentium counters */ \
127 __PMC_CLASS(P6) /* Intel Pentium Pro counters */ \
128 __PMC_CLASS(P4) /* Intel Pentium-IV counters */ \
129 __PMC_CLASS(IAF) /* Intel Core2/Atom, fixed function */ \
130 __PMC_CLASS(IAP) /* Intel Core...Atom, programmable */ \
131 __PMC_CLASS(UCF) /* Intel Uncore fixed function */ \
132 __PMC_CLASS(UCP) /* Intel Uncore programmable */ \
133 __PMC_CLASS(XSCALE) /* Intel XScale counters */ \
134 __PMC_CLASS(MIPS24K) /* MIPS 24K */ \
135 __PMC_CLASS(OCTEON) /* Cavium Octeon */ \
136 __PMC_CLASS(PPC7450) /* Motorola MPC7450 class */ \
137 __PMC_CLASS(PPC970) /* IBM PowerPC 970 class */ \
138 __PMC_CLASS(SOFT) /* Software events */
142 #define __PMC_CLASS(N) PMC_CLASS_##N ,
146 #define PMC_CLASS_FIRST PMC_CLASS_TSC
147 #define PMC_CLASS_LAST PMC_CLASS_SOFT
150 * A PMC can be in the following states:
153 * DISABLED -- administratively prohibited from being used.
154 * FREE -- HW available for use
156 * ALLOCATED -- allocated
157 * STOPPED -- allocated, but not counting events
158 * RUNNING -- allocated, and in operation; 'pm_runcount'
159 * holds the number of CPUs using this PMC at
161 * DELETED -- being destroyed
164 #define __PMC_HWSTATES() \
165 __PMC_STATE(DISABLED) \
168 #define __PMC_SWSTATES() \
169 __PMC_STATE(ALLOCATED) \
170 __PMC_STATE(STOPPED) \
171 __PMC_STATE(RUNNING) \
174 #define __PMC_STATES() \
180 #define __PMC_STATE(S) PMC_STATE_##S,
185 #define PMC_STATE_FIRST PMC_STATE_DISABLED
186 #define PMC_STATE_LAST PMC_STATE_DELETED
189 * An allocated PMC may used as a 'global' counter or as a
190 * 'thread-private' one. Each such mode of use can be in either
191 * statistical sampling mode or in counting mode. Thus a PMC in use
193 * SS i.e., SYSTEM STATISTICAL -- system-wide statistical profiling
194 * SC i.e., SYSTEM COUNTER -- system-wide counting mode
195 * TS i.e., THREAD STATISTICAL -- thread virtual, statistical profiling
196 * TC i.e., THREAD COUNTER -- thread virtual, counting mode
198 * Statistical profiling modes rely on the PMC periodically delivering
199 * a interrupt to the CPU (when the configured number of events have
200 * been measured), so the PMC must have the ability to generate
203 * In counting modes, the PMC counts its configured events, with the
204 * value of the PMC being read whenever needed by its owner process.
206 * The thread specific modes "virtualize" the PMCs -- the PMCs appear
207 * to be thread private and count events only when the profiled thread
208 * actually executes on the CPU.
210 * The system-wide "global" modes keep the PMCs running all the time
211 * and are used to measure the behaviour of the whole system.
214 #define __PMC_MODES() \
222 #define __PMC_MODE(M,N) PMC_MODE_##M = N,
226 #define PMC_MODE_FIRST PMC_MODE_SS
227 #define PMC_MODE_LAST PMC_MODE_TC
229 #define PMC_IS_COUNTING_MODE(mode) \
230 ((mode) == PMC_MODE_SC || (mode) == PMC_MODE_TC)
231 #define PMC_IS_SYSTEM_MODE(mode) \
232 ((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC)
233 #define PMC_IS_SAMPLING_MODE(mode) \
234 ((mode) == PMC_MODE_SS || (mode) == PMC_MODE_TS)
235 #define PMC_IS_VIRTUAL_MODE(mode) \
236 ((mode) == PMC_MODE_TS || (mode) == PMC_MODE_TC)
239 * PMC row disposition
242 #define __PMC_DISPOSITIONS(N) \
243 __PMC_DISP(STANDALONE) /* global/disabled counters */ \
244 __PMC_DISP(FREE) /* free/available */ \
245 __PMC_DISP(THREAD) /* thread-virtual PMCs */ \
246 __PMC_DISP(UNKNOWN) /* sentinel */
250 #define __PMC_DISP(D) PMC_DISP_##D ,
254 #define PMC_DISP_FIRST PMC_DISP_STANDALONE
255 #define PMC_DISP_LAST PMC_DISP_THREAD
258 * Counter capabilities
260 * __PMC_CAPS(NAME, VALUE, DESCRIPTION)
263 #define __PMC_CAPS() \
264 __PMC_CAP(INTERRUPT, 0, "generate interrupts") \
265 __PMC_CAP(USER, 1, "count user-mode events") \
266 __PMC_CAP(SYSTEM, 2, "count system-mode events") \
267 __PMC_CAP(EDGE, 3, "do edge detection of events") \
268 __PMC_CAP(THRESHOLD, 4, "ignore events below a threshold") \
269 __PMC_CAP(READ, 5, "read PMC counter") \
270 __PMC_CAP(WRITE, 6, "reprogram PMC counter") \
271 __PMC_CAP(INVERT, 7, "invert comparison sense") \
272 __PMC_CAP(QUALIFIER, 8, "further qualify monitored events") \
273 __PMC_CAP(PRECISE, 9, "perform precise sampling") \
274 __PMC_CAP(TAGGING, 10, "tag upstream events") \
275 __PMC_CAP(CASCADE, 11, "cascade counters")
280 #define __PMC_CAP(NAME, VALUE, DESCR) PMC_CAP_##NAME = (1 << VALUE) ,
284 #define PMC_CAP_FIRST PMC_CAP_INTERRUPT
285 #define PMC_CAP_LAST PMC_CAP_CASCADE
290 * These are generated from the definitions in "dev/hwpmc/pmc_events.h".
295 #undef __PMC_EV_BLOCK
296 #define __PMC_EV_BLOCK(C,V) PMC_EV_ ## C ## __BLOCK_START = (V) - 1 ,
297 #define __PMC_EV(C,N) PMC_EV_ ## C ## _ ## N ,
302 * PMC SYSCALL INTERFACE
306 * "PMC_OPS" -- these are the commands recognized by the kernel
307 * module, and are used when performing a system call from userland.
309 #define __PMC_OPS() \
310 __PMC_OP(CONFIGURELOG, "Set log file") \
311 __PMC_OP(FLUSHLOG, "Flush log file") \
312 __PMC_OP(GETCPUINFO, "Get system CPU information") \
313 __PMC_OP(GETDRIVERSTATS, "Get driver statistics") \
314 __PMC_OP(GETMODULEVERSION, "Get module version") \
315 __PMC_OP(GETPMCINFO, "Get per-cpu PMC information") \
316 __PMC_OP(PMCADMIN, "Set PMC state") \
317 __PMC_OP(PMCALLOCATE, "Allocate and configure a PMC") \
318 __PMC_OP(PMCATTACH, "Attach a PMC to a process") \
319 __PMC_OP(PMCDETACH, "Detach a PMC from a process") \
320 __PMC_OP(PMCGETMSR, "Get a PMC's hardware address") \
321 __PMC_OP(PMCRELEASE, "Release a PMC") \
322 __PMC_OP(PMCRW, "Read/Set a PMC") \
323 __PMC_OP(PMCSETCOUNT, "Set initial count/sampling rate") \
324 __PMC_OP(PMCSTART, "Start a PMC") \
325 __PMC_OP(PMCSTOP, "Stop a PMC") \
326 __PMC_OP(WRITELOG, "Write a cookie to the log file") \
327 __PMC_OP(CLOSELOG, "Close log file") \
328 __PMC_OP(GETDYNEVENTINFO, "Get dynamic events list")
333 #define __PMC_OP(N, D) PMC_OP_##N,
339 * Flags used in operations on PMCs.
342 #define PMC_F_FORCE 0x00000001 /*OP ADMIN force operation */
343 #define PMC_F_DESCENDANTS 0x00000002 /*OP ALLOCATE track descendants */
344 #define PMC_F_LOG_PROCCSW 0x00000004 /*OP ALLOCATE track ctx switches */
345 #define PMC_F_LOG_PROCEXIT 0x00000008 /*OP ALLOCATE log proc exits */
346 #define PMC_F_NEWVALUE 0x00000010 /*OP RW write new value */
347 #define PMC_F_OLDVALUE 0x00000020 /*OP RW get old value */
348 #define PMC_F_KGMON 0x00000040 /*OP ALLOCATE kgmon(8) profiling */
350 #define PMC_F_CALLCHAIN 0x00000080 /*OP ALLOCATE capture callchains */
353 #define PMC_F_ATTACHED_TO_OWNER 0x00010000 /*attached to owner*/
354 #define PMC_F_NEEDS_LOGFILE 0x00020000 /*needs log file */
355 #define PMC_F_ATTACH_DONE 0x00040000 /*attached at least once */
357 #define PMC_CALLCHAIN_DEPTH_MAX 128
359 #define PMC_CC_F_USERSPACE 0x01 /*userspace callchain*/
362 * Cookies used to denote allocated PMCs, and the values of PMCs.
365 typedef uint32_t pmc_id_t;
366 typedef uint64_t pmc_value_t;
368 #define PMC_ID_INVALID (~ (pmc_id_t) 0)
371 * PMC IDs have the following format:
373 * +--------+----------+-----------+-----------+
374 * | CPU | PMC MODE | PMC CLASS | ROW INDEX |
375 * +--------+----------+-----------+-----------+
377 * where each field is 8 bits wide. Field 'CPU' is set to the
378 * requested CPU for system-wide PMCs or PMC_CPU_ANY for process-mode
379 * PMCs. Field 'PMC MODE' is the allocated PMC mode. Field 'PMC
380 * CLASS' is the class of the PMC. Field 'ROW INDEX' is the row index
383 * The 'ROW INDEX' ranges over 0..NWPMCS where NHWPMCS is the total
384 * number of hardware PMCs on this cpu.
388 #define PMC_ID_TO_ROWINDEX(ID) ((ID) & 0xFF)
389 #define PMC_ID_TO_CLASS(ID) (((ID) & 0xFF00) >> 8)
390 #define PMC_ID_TO_MODE(ID) (((ID) & 0xFF0000) >> 16)
391 #define PMC_ID_TO_CPU(ID) (((ID) & 0xFF000000) >> 24)
392 #define PMC_ID_MAKE_ID(CPU,MODE,CLASS,ROWINDEX) \
393 ((((CPU) & 0xFF) << 24) | (((MODE) & 0xFF) << 16) | \
394 (((CLASS) & 0xFF) << 8) | ((ROWINDEX) & 0xFF))
397 * Data structures for system calls supported by the pmc driver.
403 * Allocate a PMC on the named CPU.
406 #define PMC_CPU_ANY ~0
408 struct pmc_op_pmcallocate {
409 uint32_t pm_caps; /* PMC_CAP_* */
410 uint32_t pm_cpu; /* CPU number or PMC_CPU_ANY */
411 enum pmc_class pm_class; /* class of PMC desired */
412 enum pmc_event pm_ev; /* [enum pmc_event] desired */
413 uint32_t pm_flags; /* additional modifiers PMC_F_* */
414 enum pmc_mode pm_mode; /* desired mode */
415 pmc_id_t pm_pmcid; /* [return] process pmc id */
417 union pmc_md_op_pmcallocate pm_md; /* MD layer extensions */
423 * Set the administrative state (i.e., whether enabled or disabled) of
424 * a PMC 'pm_pmc' on CPU 'pm_cpu'. Note that 'pm_pmc' specifies an
425 * absolute PMC number and need not have been first allocated by the
429 struct pmc_op_pmcadmin {
430 int pm_cpu; /* CPU# */
431 uint32_t pm_flags; /* flags */
432 int pm_pmc; /* PMC# */
433 enum pmc_state pm_state; /* desired state */
437 * OP PMCATTACH / OP PMCDETACH
439 * Attach/detach a PMC and a process.
442 struct pmc_op_pmcattach {
443 pmc_id_t pm_pmc; /* PMC to attach to */
444 pid_t pm_pid; /* target process */
450 * Set the sampling rate (i.e., the reload count) for statistical counters.
451 * 'pm_pmcid' need to have been previously allocated using PMCALLOCATE.
454 struct pmc_op_pmcsetcount {
455 pmc_value_t pm_count; /* initial/sample count */
456 pmc_id_t pm_pmcid; /* PMC id to set */
463 * Read the value of a PMC named by 'pm_pmcid'. 'pm_pmcid' needs
464 * to have been previously allocated using PMCALLOCATE.
468 struct pmc_op_pmcrw {
469 uint32_t pm_flags; /* PMC_F_{OLD,NEW}VALUE*/
470 pmc_id_t pm_pmcid; /* pmc id */
471 pmc_value_t pm_value; /* new&returned value */
478 * retrieve PMC state for a named CPU. The caller is expected to
479 * allocate 'npmc' * 'struct pmc_info' bytes of space for the return
484 char pm_name[PMC_NAME_MAX]; /* pmc name */
485 enum pmc_class pm_class; /* enum pmc_class */
486 int pm_enabled; /* whether enabled */
487 enum pmc_disp pm_rowdisp; /* FREE, THREAD or STANDLONE */
488 pid_t pm_ownerpid; /* owner, or -1 */
489 enum pmc_mode pm_mode; /* current mode [enum pmc_mode] */
490 enum pmc_event pm_event; /* current event */
491 uint32_t pm_flags; /* current flags */
492 pmc_value_t pm_reloadcount; /* sampling counters only */
495 struct pmc_op_getpmcinfo {
496 int32_t pm_cpu; /* 0 <= cpu < mp_maxid */
497 struct pmc_info pm_pmcs[]; /* space for 'npmc' structures */
504 * Retrieve system CPU information.
508 struct pmc_classinfo {
509 enum pmc_class pm_class; /* class id */
510 uint32_t pm_caps; /* counter capabilities */
511 uint32_t pm_width; /* width of the PMC */
512 uint32_t pm_num; /* number of PMCs in class */
515 struct pmc_op_getcpuinfo {
516 enum pmc_cputype pm_cputype; /* what kind of CPU */
517 uint32_t pm_ncpu; /* max CPU number */
518 uint32_t pm_npmc; /* #PMCs per CPU */
519 uint32_t pm_nclass; /* #classes of PMCs */
520 struct pmc_classinfo pm_classes[PMC_CLASS_MAX];
526 * Configure a log file for writing system-wide statistics to.
529 struct pmc_op_configurelog {
531 int pm_logfd; /* logfile fd (or -1) */
537 * Retrieve pmc(4) driver-wide statistics.
540 struct pmc_op_getdriverstats {
541 unsigned int pm_intr_ignored; /* #interrupts ignored */
542 unsigned int pm_intr_processed; /* #interrupts processed */
543 unsigned int pm_intr_bufferfull; /* #interrupts with ENOSPC */
544 unsigned int pm_syscalls; /* #syscalls */
545 unsigned int pm_syscall_errors; /* #syscalls with errors */
546 unsigned int pm_buffer_requests; /* #buffer requests */
547 unsigned int pm_buffer_requests_failed; /* #failed buffer requests */
548 unsigned int pm_log_sweeps; /* #sample buffer processing
553 * OP RELEASE / OP START / OP STOP
555 * Simple operations on a PMC id.
558 struct pmc_op_simple {
565 * Flush the current log buffer and write 4 bytes of user data to it.
568 struct pmc_op_writelog {
569 uint32_t pm_userdata;
575 * Retrieve the machine specific address associated with the allocated
576 * PMC. This number can be used subsequently with a read-performance-counter
580 struct pmc_op_getmsr {
581 uint32_t pm_msr; /* machine specific address */
582 pmc_id_t pm_pmcid; /* allocated pmc id */
588 * Retrieve a PMC dynamic class events list.
591 struct pmc_dyn_event_descr {
592 char pm_ev_name[PMC_NAME_MAX];
593 enum pmc_event pm_ev_code;
596 struct pmc_op_getdyneventinfo {
597 enum pmc_class pm_class;
598 unsigned int pm_nevent;
599 struct pmc_dyn_event_descr pm_events[PMC_EV_DYN_COUNT];
604 #include <sys/malloc.h>
605 #include <sys/sysctl.h>
606 #include <sys/_cpuset.h>
608 #include <machine/frame.h>
610 #define PMC_HASH_SIZE 1024
611 #define PMC_MTXPOOL_SIZE 2048
612 #define PMC_LOG_BUFFER_SIZE 4
613 #define PMC_NLOGBUFFERS 1024
614 #define PMC_NSAMPLES 1024
615 #define PMC_CALLCHAIN_DEPTH 32
617 #define PMC_SYSCTL_NAME_PREFIX "kern." PMC_MODULE_NAME "."
622 * (b) - pmc_bufferlist_mtx (spin lock)
623 * (k) - pmc_kthread_mtx (sleep lock)
624 * (o) - po->po_mtx (spin lock)
631 struct pmc_syscall_args {
632 register_t pmop_code; /* one of PMC_OP_* */
633 void *pmop_data; /* syscall parameter */
637 * Interface to processor specific s1tuff
643 * Machine independent (i.e., the common parts) of a human readable
648 char pd_name[PMC_NAME_MAX]; /* name */
649 uint32_t pd_caps; /* capabilities */
650 enum pmc_class pd_class; /* class of the PMC */
651 uint32_t pd_width; /* width in bits */
657 * This structure records all the target processes associated with a
662 LIST_ENTRY(pmc_target) pt_next;
663 struct pmc_process *pt_process; /* target descriptor */
669 * Describes each allocated PMC.
671 * Each PMC has precisely one owner, namely the process that allocated
674 * A PMC may be attached to multiple target processes. The
675 * 'pm_targets' field links all the target processes being monitored
678 * The 'pm_savedvalue' field is protected by a mutex.
680 * On a multi-cpu machine, multiple target threads associated with a
681 * process-virtual PMC could be concurrently executing on different
682 * CPUs. The 'pm_runcount' field is atomically incremented every time
683 * the PMC gets scheduled on a CPU and atomically decremented when it
684 * get descheduled. Deletion of a PMC is only permitted when this
690 LIST_HEAD(,pmc_target) pm_targets; /* list of target processes */
691 LIST_ENTRY(pmc) pm_next; /* owner's list */
694 * System-wide PMCs are allocated on a CPU and are not moved
695 * around. For system-wide PMCs we record the CPU the PMC was
696 * allocated on in the 'CPU' field of the pmc ID.
698 * Virtual PMCs run on whichever CPU is currently executing
699 * their targets' threads. For these PMCs we need to save
700 * their current PMC counter values when they are taken off
705 pmc_value_t pm_savedvalue; /* Virtual PMCS */
709 * For sampling mode PMCs, we keep track of the PMC's "reload
710 * count", which is the counter value to be loaded in when
711 * arming the PMC for the next counting session. For counting
712 * modes on PMCs that are read-only (e.g., the x86 TSC), we
713 * keep track of the initial value at the start of
714 * counting-mode operation.
718 pmc_value_t pm_reloadcount; /* sampling PMC modes */
719 pmc_value_t pm_initial; /* counting PMC modes */
722 volatile cpuset_t pm_stalled; /* marks stalled sampling PMCs */
723 volatile cpuset_t pm_cpustate; /* CPUs where PMC should be active */
724 uint32_t pm_caps; /* PMC capabilities */
725 enum pmc_event pm_event; /* event being measured */
726 uint32_t pm_flags; /* additional flags PMC_F_... */
727 struct pmc_owner *pm_owner; /* owner thread state */
728 int pm_runcount; /* #cpus currently on */
729 enum pmc_state pm_state; /* current PMC state */
732 * The PMC ID field encodes the row-index for the PMC, its
733 * mode, class and the CPU# associated with the PMC.
736 pmc_id_t pm_id; /* allocated PMC id */
739 union pmc_md_pmc pm_md;
743 * Accessor macros for 'struct pmc'
746 #define PMC_TO_MODE(P) PMC_ID_TO_MODE((P)->pm_id)
747 #define PMC_TO_CLASS(P) PMC_ID_TO_CLASS((P)->pm_id)
748 #define PMC_TO_ROWINDEX(P) PMC_ID_TO_ROWINDEX((P)->pm_id)
749 #define PMC_TO_CPU(P) PMC_ID_TO_CPU((P)->pm_id)
755 * Record a 'target' process being profiled.
757 * The target process being profiled could be different from the owner
758 * process which allocated the PMCs. Each target process descriptor
759 * is associated with NHWPMC 'struct pmc *' pointers. Each PMC at a
760 * given hardware row-index 'n' will use slot 'n' of the 'pp_pmcs[]'
761 * array. The size of this structure is thus PMC architecture
766 struct pmc_targetstate {
767 struct pmc *pp_pmc; /* target PMC */
768 pmc_value_t pp_pmcval; /* per-process value */
772 LIST_ENTRY(pmc_process) pp_next; /* hash chain */
773 int pp_refcnt; /* reference count */
774 uint32_t pp_flags; /* flags PMC_PP_* */
775 struct proc *pp_proc; /* target thread */
776 struct pmc_targetstate pp_pmcs[]; /* NHWPMCs */
779 #define PMC_PP_ENABLE_MSR_ACCESS 0x00000001
784 * We associate a PMC with an 'owner' process.
786 * A process can be associated with 0..NCPUS*NHWPMC PMCs during its
787 * lifetime, where NCPUS is the numbers of CPUS in the system and
788 * NHWPMC is the number of hardware PMCs per CPU. These are
789 * maintained in the list headed by the 'po_pmcs' to save on space.
794 LIST_ENTRY(pmc_owner) po_next; /* hash chain */
795 LIST_ENTRY(pmc_owner) po_ssnext; /* list of SS PMC owners */
796 LIST_HEAD(, pmc) po_pmcs; /* owned PMC list */
797 TAILQ_HEAD(, pmclog_buffer) po_logbuffers; /* (o) logbuffer list */
798 struct mtx po_mtx; /* spin lock for (o) */
799 struct proc *po_owner; /* owner proc */
800 uint32_t po_flags; /* (k) flags PMC_PO_* */
801 struct proc *po_kthread; /* (k) helper kthread */
802 struct pmclog_buffer *po_curbuf; /* current log buffer */
803 struct file *po_file; /* file reference */
804 int po_error; /* recorded error */
805 short po_sscount; /* # SS PMCs owned */
806 short po_logprocmaps; /* global mappings done */
809 #define PMC_PO_OWNS_LOGFILE 0x00000001 /* has a log file */
810 #define PMC_PO_SHUTDOWN 0x00000010 /* in the process of shutdown */
811 #define PMC_PO_INITIAL_MAPPINGS_DONE 0x00000020
814 * struct pmc_hw -- describe the state of the PMC hardware
816 * When in use, a HW PMC is associated with one allocated 'struct pmc'
817 * pointed to by field 'phw_pmc'. When inactive, this field is NULL.
819 * On an SMP box, one or more HW PMC's in process virtual mode with
820 * the same 'phw_pmc' could be executing on different CPUs. In order
821 * to handle this case correctly, we need to ensure that only
822 * incremental counts get added to the saved value in the associated
823 * 'struct pmc'. The 'phw_save' field is used to keep the saved PMC
824 * value at the time the hardware is started during this context
825 * switch (i.e., the difference between the new (hardware) count and
826 * the saved count is atomically added to the count field in 'struct
827 * pmc' at context switch time).
832 uint32_t phw_state; /* see PHW_* macros below */
833 struct pmc *phw_pmc; /* current thread PMC */
836 #define PMC_PHW_RI_MASK 0x000000FF
837 #define PMC_PHW_CPU_SHIFT 8
838 #define PMC_PHW_CPU_MASK 0x0000FF00
839 #define PMC_PHW_FLAGS_SHIFT 16
840 #define PMC_PHW_FLAGS_MASK 0xFFFF0000
842 #define PMC_PHW_INDEX_TO_STATE(ri) ((ri) & PMC_PHW_RI_MASK)
843 #define PMC_PHW_STATE_TO_INDEX(state) ((state) & PMC_PHW_RI_MASK)
844 #define PMC_PHW_CPU_TO_STATE(cpu) (((cpu) << PMC_PHW_CPU_SHIFT) & \
846 #define PMC_PHW_STATE_TO_CPU(state) (((state) & PMC_PHW_CPU_MASK) >> \
848 #define PMC_PHW_FLAGS_TO_STATE(flags) (((flags) << PMC_PHW_FLAGS_SHIFT) & \
850 #define PMC_PHW_STATE_TO_FLAGS(state) (((state) & PMC_PHW_FLAGS_MASK) >> \
852 #define PMC_PHW_FLAG_IS_ENABLED (PMC_PHW_FLAGS_TO_STATE(0x01))
853 #define PMC_PHW_FLAG_IS_SHAREABLE (PMC_PHW_FLAGS_TO_STATE(0x02))
858 * Space for N (tunable) PC samples and associated control data.
862 uint16_t ps_nsamples; /* callchain depth */
863 uint8_t ps_cpu; /* cpu number */
864 uint8_t ps_flags; /* other flags */
865 pid_t ps_pid; /* process PID or -1 */
866 struct thread *ps_td; /* which thread */
867 struct pmc *ps_pmc; /* interrupting PMC */
868 uintptr_t *ps_pc; /* (const) callchain start */
871 #define PMC_SAMPLE_FREE ((uint16_t) 0)
872 #define PMC_SAMPLE_INUSE ((uint16_t) 0xFFFF)
874 struct pmc_samplebuffer {
875 struct pmc_sample * volatile ps_read; /* read pointer */
876 struct pmc_sample * volatile ps_write; /* write pointer */
877 uintptr_t *ps_callchains; /* all saved call chains */
878 struct pmc_sample *ps_fence; /* one beyond ps_samples[] */
879 struct pmc_sample ps_samples[]; /* array of sample entries */
884 * struct pmc_cpustate
886 * A CPU is modelled as a collection of HW PMCs with space for additional
891 uint32_t pc_state; /* physical cpu number + flags */
892 struct pmc_samplebuffer *pc_sb[2]; /* space for samples */
893 struct pmc_hw *pc_hwpmcs[]; /* 'npmc' pointers */
896 #define PMC_PCPU_CPU_MASK 0x000000FF
897 #define PMC_PCPU_FLAGS_MASK 0xFFFFFF00
898 #define PMC_PCPU_FLAGS_SHIFT 8
899 #define PMC_PCPU_STATE_TO_CPU(S) ((S) & PMC_PCPU_CPU_MASK)
900 #define PMC_PCPU_STATE_TO_FLAGS(S) (((S) & PMC_PCPU_FLAGS_MASK) >> PMC_PCPU_FLAGS_SHIFT)
901 #define PMC_PCPU_FLAGS_TO_STATE(F) (((F) << PMC_PCPU_FLAGS_SHIFT) & PMC_PCPU_FLAGS_MASK)
902 #define PMC_PCPU_CPU_TO_STATE(C) ((C) & PMC_PCPU_CPU_MASK)
903 #define PMC_PCPU_FLAG_HTT (PMC_PCPU_FLAGS_TO_STATE(0x1))
908 * CPU binding information.
912 int pb_bound; /* is bound? */
913 int pb_cpu; /* if so, to which CPU */
920 * struct pmc_classdep
922 * PMC class-dependent operations.
924 struct pmc_classdep {
925 uint32_t pcd_caps; /* class capabilities */
926 enum pmc_class pcd_class; /* class id */
927 int pcd_num; /* number of PMCs */
928 int pcd_ri; /* row index of the first PMC in class */
929 int pcd_width; /* width of the PMC */
931 /* configuring/reading/writing the hardware PMCs */
932 int (*pcd_config_pmc)(int _cpu, int _ri, struct pmc *_pm);
933 int (*pcd_get_config)(int _cpu, int _ri, struct pmc **_ppm);
934 int (*pcd_read_pmc)(int _cpu, int _ri, pmc_value_t *_value);
935 int (*pcd_write_pmc)(int _cpu, int _ri, pmc_value_t _value);
937 /* pmc allocation/release */
938 int (*pcd_allocate_pmc)(int _cpu, int _ri, struct pmc *_t,
939 const struct pmc_op_pmcallocate *_a);
940 int (*pcd_release_pmc)(int _cpu, int _ri, struct pmc *_pm);
942 /* starting and stopping PMCs */
943 int (*pcd_start_pmc)(int _cpu, int _ri);
944 int (*pcd_stop_pmc)(int _cpu, int _ri);
947 int (*pcd_describe)(int _cpu, int _ri, struct pmc_info *_pi,
950 /* class-dependent initialization & finalization */
951 int (*pcd_pcpu_init)(struct pmc_mdep *_md, int _cpu);
952 int (*pcd_pcpu_fini)(struct pmc_mdep *_md, int _cpu);
954 /* machine-specific interface */
955 int (*pcd_get_msr)(int _ri, uint32_t *_msr);
961 * Machine dependent bits needed per CPU type.
965 uint32_t pmd_cputype; /* from enum pmc_cputype */
966 uint32_t pmd_npmc; /* number of PMCs per CPU */
967 uint32_t pmd_nclass; /* number of PMC classes present */
970 * Machine dependent methods.
973 /* per-cpu initialization and finalization */
974 int (*pmd_pcpu_init)(struct pmc_mdep *_md, int _cpu);
975 int (*pmd_pcpu_fini)(struct pmc_mdep *_md, int _cpu);
977 /* thread context switch in/out */
978 int (*pmd_switch_in)(struct pmc_cpu *_p, struct pmc_process *_pp);
979 int (*pmd_switch_out)(struct pmc_cpu *_p, struct pmc_process *_pp);
981 /* handle a PMC interrupt */
982 int (*pmd_intr)(int _cpu, struct trapframe *_tf);
985 * PMC class dependent information.
987 struct pmc_classdep pmd_classdep[];
991 * Per-CPU state. This is an array of 'mp_ncpu' pointers
992 * to struct pmc_cpu descriptors.
995 extern struct pmc_cpu **pmc_pcpu;
997 /* driver statistics */
998 extern struct pmc_op_getdriverstats pmc_stats;
1000 #if defined(HWPMC_DEBUG)
1001 #include <sys/ktr.h>
1003 /* debug flags, major flag groups */
1004 struct pmc_debugflags {
1016 extern struct pmc_debugflags pmc_debugflags;
1018 #define KTR_PMC KTR_SUBSYS
1020 #define PMC_DEBUG_STRSIZE 128
1021 #define PMC_DEBUG_DEFAULT_FLAGS { 0, 0, 0, 0, 0, 0, 0, 0 }
1023 #define PMCDBG0(M, N, L, F) do { \
1024 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \
1025 CTR0(KTR_PMC, #M ":" #N ":" #L ": " F); \
1027 #define PMCDBG1(M, N, L, F, p1) do { \
1028 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \
1029 CTR1(KTR_PMC, #M ":" #N ":" #L ": " F, p1); \
1031 #define PMCDBG2(M, N, L, F, p1, p2) do { \
1032 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \
1033 CTR2(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2); \
1035 #define PMCDBG3(M, N, L, F, p1, p2, p3) do { \
1036 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \
1037 CTR3(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3); \
1039 #define PMCDBG4(M, N, L, F, p1, p2, p3, p4) do { \
1040 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \
1041 CTR4(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4);\
1043 #define PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5) do { \
1044 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \
1045 CTR5(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4, \
1048 #define PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6) do { \
1049 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \
1050 CTR6(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4, \
1055 #define PMC_DEBUG_MAJ_CPU 0 /* cpu switches */
1056 #define PMC_DEBUG_MAJ_CSW 1 /* context switches */
1057 #define PMC_DEBUG_MAJ_LOG 2 /* logging */
1058 #define PMC_DEBUG_MAJ_MDP 3 /* machine dependent */
1059 #define PMC_DEBUG_MAJ_MOD 4 /* misc module infrastructure */
1060 #define PMC_DEBUG_MAJ_OWN 5 /* owner */
1061 #define PMC_DEBUG_MAJ_PMC 6 /* pmc management */
1062 #define PMC_DEBUG_MAJ_PRC 7 /* processes */
1063 #define PMC_DEBUG_MAJ_SAM 8 /* sampling */
1067 /* Common (8 bits) */
1068 #define PMC_DEBUG_MIN_ALL 0 /* allocation */
1069 #define PMC_DEBUG_MIN_REL 1 /* release */
1070 #define PMC_DEBUG_MIN_OPS 2 /* ops: start, stop, ... */
1071 #define PMC_DEBUG_MIN_INI 3 /* init */
1072 #define PMC_DEBUG_MIN_FND 4 /* find */
1075 #define PMC_DEBUG_MIN_PMH 14 /* pmc_hook */
1076 #define PMC_DEBUG_MIN_PMS 15 /* pmc_syscall */
1079 #define PMC_DEBUG_MIN_ORM 8 /* owner remove */
1080 #define PMC_DEBUG_MIN_OMR 9 /* owner maybe remove */
1083 #define PMC_DEBUG_MIN_TLK 8 /* link target */
1084 #define PMC_DEBUG_MIN_TUL 9 /* unlink target */
1085 #define PMC_DEBUG_MIN_EXT 10 /* process exit */
1086 #define PMC_DEBUG_MIN_EXC 11 /* process exec */
1087 #define PMC_DEBUG_MIN_FRK 12 /* process fork */
1088 #define PMC_DEBUG_MIN_ATT 13 /* attach/detach */
1089 #define PMC_DEBUG_MIN_SIG 14 /* signalling */
1091 /* CONTEXT SWITCHES */
1092 #define PMC_DEBUG_MIN_SWI 8 /* switch in */
1093 #define PMC_DEBUG_MIN_SWO 9 /* switch out */
1096 #define PMC_DEBUG_MIN_REG 8 /* pmc register */
1097 #define PMC_DEBUG_MIN_ALR 9 /* allocate row */
1099 /* MACHINE DEPENDENT LAYER */
1100 #define PMC_DEBUG_MIN_REA 8 /* read */
1101 #define PMC_DEBUG_MIN_WRI 9 /* write */
1102 #define PMC_DEBUG_MIN_CFG 10 /* config */
1103 #define PMC_DEBUG_MIN_STA 11 /* start */
1104 #define PMC_DEBUG_MIN_STO 12 /* stop */
1105 #define PMC_DEBUG_MIN_INT 13 /* interrupts */
1108 #define PMC_DEBUG_MIN_BND 8 /* bind */
1109 #define PMC_DEBUG_MIN_SEL 9 /* select */
1112 #define PMC_DEBUG_MIN_GTB 8 /* get buf */
1113 #define PMC_DEBUG_MIN_SIO 9 /* schedule i/o */
1114 #define PMC_DEBUG_MIN_FLS 10 /* flush */
1115 #define PMC_DEBUG_MIN_SAM 11 /* sample */
1116 #define PMC_DEBUG_MIN_CLO 12 /* close */
1119 #define PMCDBG0(M, N, L, F) /* nothing */
1120 #define PMCDBG1(M, N, L, F, p1)
1121 #define PMCDBG2(M, N, L, F, p1, p2)
1122 #define PMCDBG3(M, N, L, F, p1, p2, p3)
1123 #define PMCDBG4(M, N, L, F, p1, p2, p3, p4)
1124 #define PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5)
1125 #define PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6)
1128 /* declare a dedicated memory pool */
1129 MALLOC_DECLARE(M_PMC);
1135 struct pmc_mdep *pmc_md_initialize(void); /* MD init function */
1136 void pmc_md_finalize(struct pmc_mdep *_md); /* MD fini function */
1137 int pmc_getrowdisp(int _ri);
1138 int pmc_process_interrupt(int _cpu, int _soft, struct pmc *_pm,
1139 struct trapframe *_tf, int _inuserspace);
1140 int pmc_save_kernel_callchain(uintptr_t *_cc, int _maxsamples,
1141 struct trapframe *_tf);
1142 int pmc_save_user_callchain(uintptr_t *_cc, int _maxsamples,
1143 struct trapframe *_tf);
1144 struct pmc_mdep *pmc_mdep_alloc(int nclasses);
1145 void pmc_mdep_free(struct pmc_mdep *md);
1146 #endif /* _KERNEL */
1147 #endif /* _SYS_PMC_H_ */