2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2008 Joseph Koshy
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
39 #include <sys/pmckern.h>
41 #include <sys/systm.h>
43 #include <machine/intr_machdep.h>
44 #include <x86/apicvar.h>
45 #include <machine/cpu.h>
46 #include <machine/cpufunc.h>
47 #include <machine/md_var.h>
48 #include <machine/specialreg.h>
50 #define CORE_CPUID_REQUEST 0xA
51 #define CORE_CPUID_REQUEST_SIZE 0x4
52 #define CORE_CPUID_EAX 0x0
53 #define CORE_CPUID_EBX 0x1
54 #define CORE_CPUID_ECX 0x2
55 #define CORE_CPUID_EDX 0x3
57 #define IAF_PMC_CAPS \
58 (PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INTERRUPT | \
59 PMC_CAP_USER | PMC_CAP_SYSTEM)
60 #define IAF_RI_TO_MSR(RI) ((RI) + (1 << 30))
62 #define IAP_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | PMC_CAP_SYSTEM | \
63 PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
64 PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE)
66 #define EV_IS_NOTARCH 0
67 #define EV_IS_ARCH_SUPP 1
68 #define EV_IS_ARCH_NOTSUPP -1
71 * "Architectural" events defined by Intel. The values of these
72 * symbols correspond to positions in the bitmask returned by
73 * the CPUID.0AH instruction.
75 enum core_arch_events {
76 CORE_AE_BRANCH_INSTRUCTION_RETIRED = 5,
77 CORE_AE_BRANCH_MISSES_RETIRED = 6,
78 CORE_AE_INSTRUCTION_RETIRED = 1,
79 CORE_AE_LLC_MISSES = 4,
80 CORE_AE_LLC_REFERENCE = 3,
81 CORE_AE_UNHALTED_REFERENCE_CYCLES = 2,
82 CORE_AE_UNHALTED_CORE_CYCLES = 0
85 static enum pmc_cputype core_cputype;
88 volatile uint32_t pc_resync;
89 volatile uint32_t pc_iafctrl; /* Fixed function control. */
90 volatile uint64_t pc_globalctrl; /* Global control register. */
91 struct pmc_hw pc_corepmcs[];
94 static struct core_cpu **core_pcpu;
96 static uint32_t core_architectural_events;
97 static uint64_t core_pmcmask;
99 static int core_iaf_ri; /* relative index of fixed counters */
100 static int core_iaf_width;
101 static int core_iaf_npmc;
103 static int core_iap_width;
104 static int core_iap_npmc;
105 static int core_iap_wroffset;
107 static u_int pmc_alloc_refs;
108 static bool pmc_tsx_force_abort_set;
111 core_pcpu_noop(struct pmc_mdep *md, int cpu)
119 core_pcpu_init(struct pmc_mdep *md, int cpu)
124 int core_ri, n, npmc;
126 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
127 ("[iaf,%d] insane cpu number %d", __LINE__, cpu));
129 PMCDBG1(MDP,INI,1,"core-init cpu=%d", cpu);
131 core_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_ri;
132 npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_num;
134 if (core_cputype != PMC_CPU_INTEL_CORE)
135 npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAF].pcd_num;
137 cc = malloc(sizeof(struct core_cpu) + npmc * sizeof(struct pmc_hw),
138 M_PMC, M_WAITOK | M_ZERO);
143 KASSERT(pc != NULL && cc != NULL,
144 ("[core,%d] NULL per-cpu structures cpu=%d", __LINE__, cpu));
146 for (n = 0, phw = cc->pc_corepmcs; n < npmc; n++, phw++) {
147 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
148 PMC_PHW_CPU_TO_STATE(cpu) |
149 PMC_PHW_INDEX_TO_STATE(n + core_ri);
151 pc->pc_hwpmcs[n + core_ri] = phw;
158 core_pcpu_fini(struct pmc_mdep *md, int cpu)
160 int core_ri, n, npmc;
165 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
166 ("[core,%d] insane cpu number (%d)", __LINE__, cpu));
168 PMCDBG1(MDP,INI,1,"core-pcpu-fini cpu=%d", cpu);
170 if ((cc = core_pcpu[cpu]) == NULL)
173 core_pcpu[cpu] = NULL;
177 KASSERT(pc != NULL, ("[core,%d] NULL per-cpu %d state", __LINE__,
180 npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_num;
181 core_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_ri;
183 for (n = 0; n < npmc; n++) {
184 msr = rdmsr(IAP_EVSEL0 + n) & ~IAP_EVSEL_MASK;
185 wrmsr(IAP_EVSEL0 + n, msr);
188 if (core_cputype != PMC_CPU_INTEL_CORE) {
189 msr = rdmsr(IAF_CTRL) & ~IAF_CTRL_MASK;
190 wrmsr(IAF_CTRL, msr);
191 npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAF].pcd_num;
194 for (n = 0; n < npmc; n++)
195 pc->pc_hwpmcs[n + core_ri] = NULL;
203 * Fixed function counters.
207 iaf_perfctr_value_to_reload_count(pmc_value_t v)
210 /* If the PMC has overflowed, return a reload count of zero. */
211 if ((v & (1ULL << (core_iaf_width - 1))) == 0)
213 v &= (1ULL << core_iaf_width) - 1;
214 return (1ULL << core_iaf_width) - v;
218 iaf_reload_count_to_perfctr_value(pmc_value_t rlc)
220 return (1ULL << core_iaf_width) - rlc;
224 iaf_allocate_pmc(int cpu, int ri, struct pmc *pm,
225 const struct pmc_op_pmcallocate *a)
228 uint32_t caps, flags, config;
229 const struct pmc_md_iap_op_pmcallocate *iap;
231 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
232 ("[core,%d] illegal CPU %d", __LINE__, cpu));
234 PMCDBG2(MDP,ALL,1, "iaf-allocate ri=%d reqcaps=0x%x", ri, pm->pm_caps);
236 if (ri < 0 || ri > core_iaf_npmc)
239 if (a->pm_class != PMC_CLASS_IAF)
242 iap = &a->pm_md.pm_iap;
243 config = iap->pm_iap_config;
244 ev = IAP_EVSEL_GET(config);
245 umask = IAP_UMASK_GET(config);
247 /* INST_RETIRED.ANY */
248 if (ev == 0xC0 && ri != 0)
250 /* CPU_CLK_UNHALTED.THREAD */
251 if (ev == 0x3C && ri != 1)
253 /* CPU_CLK_UNHALTED.REF */
254 if (ev == 0x0 && umask == 0x3 && ri != 2)
258 if ((cpu_stdext_feature3 & CPUID_STDEXT3_TSXFA) != 0 &&
259 !pmc_tsx_force_abort_set) {
260 pmc_tsx_force_abort_set = true;
261 x86_msr_op(MSR_TSX_FORCE_ABORT, MSR_OP_RENDEZVOUS_ALL |
262 MSR_OP_WRITE, 1, NULL);
268 if (config & IAP_USR)
270 if (config & IAP_ANY)
272 if (config & IAP_INT)
276 if (caps & PMC_CAP_INTERRUPT)
278 if (caps & PMC_CAP_SYSTEM)
280 if (caps & PMC_CAP_USER)
282 if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0)
283 flags |= (IAF_OS | IAF_USR);
285 pm->pm_md.pm_iaf.pm_iaf_ctrl = (flags << (ri * 4));
287 PMCDBG1(MDP,ALL,2, "iaf-allocate config=0x%jx",
288 (uintmax_t) pm->pm_md.pm_iaf.pm_iaf_ctrl);
294 iaf_config_pmc(int cpu, int ri, struct pmc *pm)
296 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
297 ("[core,%d] illegal CPU %d", __LINE__, cpu));
299 KASSERT(ri >= 0 && ri < core_iaf_npmc,
300 ("[core,%d] illegal row-index %d", __LINE__, ri));
302 PMCDBG3(MDP,CFG,1, "iaf-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
304 KASSERT(core_pcpu[cpu] != NULL, ("[core,%d] null per-cpu %d", __LINE__,
307 core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri].phw_pmc = pm;
313 iaf_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
317 char iaf_name[PMC_NAME_MAX];
319 phw = &core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri];
321 (void) snprintf(iaf_name, sizeof(iaf_name), "IAF-%d", ri);
322 if ((error = copystr(iaf_name, pi->pm_name, PMC_NAME_MAX,
326 pi->pm_class = PMC_CLASS_IAF;
328 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
329 pi->pm_enabled = TRUE;
330 *ppmc = phw->phw_pmc;
332 pi->pm_enabled = FALSE;
340 iaf_get_config(int cpu, int ri, struct pmc **ppm)
342 *ppm = core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri].phw_pmc;
348 iaf_get_msr(int ri, uint32_t *msr)
350 KASSERT(ri >= 0 && ri < core_iaf_npmc,
351 ("[iaf,%d] ri %d out of range", __LINE__, ri));
353 *msr = IAF_RI_TO_MSR(ri);
359 iaf_read_pmc(int cpu, int ri, pmc_value_t *v)
364 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
365 ("[core,%d] illegal cpu value %d", __LINE__, cpu));
366 KASSERT(ri >= 0 && ri < core_iaf_npmc,
367 ("[core,%d] illegal row-index %d", __LINE__, ri));
369 pm = core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri].phw_pmc;
372 ("[core,%d] cpu %d ri %d(%d) pmc not configured", __LINE__, cpu,
373 ri, ri + core_iaf_ri));
375 tmp = rdpmc(IAF_RI_TO_MSR(ri));
377 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
378 *v = iaf_perfctr_value_to_reload_count(tmp);
380 *v = tmp & ((1ULL << core_iaf_width) - 1);
382 PMCDBG4(MDP,REA,1, "iaf-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
383 IAF_RI_TO_MSR(ri), *v);
389 iaf_release_pmc(int cpu, int ri, struct pmc *pmc)
391 PMCDBG3(MDP,REL,1, "iaf-release cpu=%d ri=%d pm=%p", cpu, ri, pmc);
393 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
394 ("[core,%d] illegal CPU value %d", __LINE__, cpu));
395 KASSERT(ri >= 0 && ri < core_iaf_npmc,
396 ("[core,%d] illegal row-index %d", __LINE__, ri));
398 KASSERT(core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri].phw_pmc == NULL,
399 ("[core,%d] PHW pmc non-NULL", __LINE__));
401 MPASS(pmc_alloc_refs > 0);
402 if (pmc_alloc_refs-- == 1 && pmc_tsx_force_abort_set) {
403 pmc_tsx_force_abort_set = false;
404 x86_msr_op(MSR_TSX_FORCE_ABORT, MSR_OP_RENDEZVOUS_ALL |
405 MSR_OP_WRITE, 0, NULL);
412 iaf_start_pmc(int cpu, int ri)
415 struct core_cpu *iafc;
418 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
419 ("[core,%d] illegal CPU value %d", __LINE__, cpu));
420 KASSERT(ri >= 0 && ri < core_iaf_npmc,
421 ("[core,%d] illegal row-index %d", __LINE__, ri));
423 PMCDBG2(MDP,STA,1,"iaf-start cpu=%d ri=%d", cpu, ri);
425 iafc = core_pcpu[cpu];
426 pm = iafc->pc_corepmcs[ri + core_iaf_ri].phw_pmc;
428 iafc->pc_iafctrl |= pm->pm_md.pm_iaf.pm_iaf_ctrl;
430 msr = rdmsr(IAF_CTRL) & ~IAF_CTRL_MASK;
431 wrmsr(IAF_CTRL, msr | (iafc->pc_iafctrl & IAF_CTRL_MASK));
435 iafc->pc_globalctrl |= (1ULL << (ri + IAF_OFFSET));
436 msr = rdmsr(IA_GLOBAL_CTRL) & ~IAF_GLOBAL_CTRL_MASK;
437 wrmsr(IA_GLOBAL_CTRL, msr | (iafc->pc_globalctrl &
438 IAF_GLOBAL_CTRL_MASK));
439 } while (iafc->pc_resync != 0);
441 PMCDBG4(MDP,STA,1,"iafctrl=%x(%x) globalctrl=%jx(%jx)",
442 iafc->pc_iafctrl, (uint32_t) rdmsr(IAF_CTRL),
443 iafc->pc_globalctrl, rdmsr(IA_GLOBAL_CTRL));
449 iaf_stop_pmc(int cpu, int ri)
452 struct core_cpu *iafc;
455 PMCDBG2(MDP,STO,1,"iaf-stop cpu=%d ri=%d", cpu, ri);
457 iafc = core_pcpu[cpu];
459 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
460 ("[core,%d] illegal CPU value %d", __LINE__, cpu));
461 KASSERT(ri >= 0 && ri < core_iaf_npmc,
462 ("[core,%d] illegal row-index %d", __LINE__, ri));
464 fc = (IAF_MASK << (ri * 4));
466 iafc->pc_iafctrl &= ~fc;
468 PMCDBG1(MDP,STO,1,"iaf-stop iafctrl=%x", iafc->pc_iafctrl);
469 msr = rdmsr(IAF_CTRL) & ~IAF_CTRL_MASK;
470 wrmsr(IAF_CTRL, msr | (iafc->pc_iafctrl & IAF_CTRL_MASK));
474 iafc->pc_globalctrl &= ~(1ULL << (ri + IAF_OFFSET));
475 msr = rdmsr(IA_GLOBAL_CTRL) & ~IAF_GLOBAL_CTRL_MASK;
476 wrmsr(IA_GLOBAL_CTRL, msr | (iafc->pc_globalctrl &
477 IAF_GLOBAL_CTRL_MASK));
478 } while (iafc->pc_resync != 0);
480 PMCDBG4(MDP,STO,1,"iafctrl=%x(%x) globalctrl=%jx(%jx)",
481 iafc->pc_iafctrl, (uint32_t) rdmsr(IAF_CTRL),
482 iafc->pc_globalctrl, rdmsr(IA_GLOBAL_CTRL));
488 iaf_write_pmc(int cpu, int ri, pmc_value_t v)
494 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
495 ("[core,%d] illegal cpu value %d", __LINE__, cpu));
496 KASSERT(ri >= 0 && ri < core_iaf_npmc,
497 ("[core,%d] illegal row-index %d", __LINE__, ri));
500 pm = cc->pc_corepmcs[ri + core_iaf_ri].phw_pmc;
503 ("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
505 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
506 v = iaf_reload_count_to_perfctr_value(v);
508 /* Turn off fixed counters */
509 msr = rdmsr(IAF_CTRL) & ~IAF_CTRL_MASK;
510 wrmsr(IAF_CTRL, msr);
512 wrmsr(IAF_CTR0 + ri, v & ((1ULL << core_iaf_width) - 1));
514 /* Turn on fixed counters */
515 msr = rdmsr(IAF_CTRL) & ~IAF_CTRL_MASK;
516 wrmsr(IAF_CTRL, msr | (cc->pc_iafctrl & IAF_CTRL_MASK));
518 PMCDBG6(MDP,WRI,1, "iaf-write cpu=%d ri=%d msr=0x%x v=%jx iafctrl=%jx "
519 "pmc=%jx", cpu, ri, IAF_RI_TO_MSR(ri), v,
520 (uintmax_t) rdmsr(IAF_CTRL),
521 (uintmax_t) rdpmc(IAF_RI_TO_MSR(ri)));
528 iaf_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
530 struct pmc_classdep *pcd;
532 KASSERT(md != NULL, ("[iaf,%d] md is NULL", __LINE__));
534 PMCDBG0(MDP,INI,1, "iaf-initialize");
536 pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAF];
538 pcd->pcd_caps = IAF_PMC_CAPS;
539 pcd->pcd_class = PMC_CLASS_IAF;
541 pcd->pcd_ri = md->pmd_npmc;
542 pcd->pcd_width = pmcwidth;
544 pcd->pcd_allocate_pmc = iaf_allocate_pmc;
545 pcd->pcd_config_pmc = iaf_config_pmc;
546 pcd->pcd_describe = iaf_describe;
547 pcd->pcd_get_config = iaf_get_config;
548 pcd->pcd_get_msr = iaf_get_msr;
549 pcd->pcd_pcpu_fini = core_pcpu_noop;
550 pcd->pcd_pcpu_init = core_pcpu_noop;
551 pcd->pcd_read_pmc = iaf_read_pmc;
552 pcd->pcd_release_pmc = iaf_release_pmc;
553 pcd->pcd_start_pmc = iaf_start_pmc;
554 pcd->pcd_stop_pmc = iaf_stop_pmc;
555 pcd->pcd_write_pmc = iaf_write_pmc;
557 md->pmd_npmc += npmc;
561 * Intel programmable PMCs.
564 /* Sub fields of UMASK that this event supports. */
565 #define IAP_M_CORE (1 << 0) /* Core specificity */
566 #define IAP_M_AGENT (1 << 1) /* Agent specificity */
567 #define IAP_M_PREFETCH (1 << 2) /* Prefetch */
568 #define IAP_M_MESI (1 << 3) /* MESI */
569 #define IAP_M_SNOOPRESPONSE (1 << 4) /* Snoop response */
570 #define IAP_M_SNOOPTYPE (1 << 5) /* Snoop type */
571 #define IAP_M_TRANSITION (1 << 6) /* Transition */
573 #define IAP_F_CORE (0x3 << 14) /* Core specificity */
574 #define IAP_F_AGENT (0x1 << 13) /* Agent specificity */
575 #define IAP_F_PREFETCH (0x3 << 12) /* Prefetch */
576 #define IAP_F_MESI (0xF << 8) /* MESI */
577 #define IAP_F_SNOOPRESPONSE (0xB << 8) /* Snoop response */
578 #define IAP_F_SNOOPTYPE (0x3 << 8) /* Snoop type */
579 #define IAP_F_TRANSITION (0x1 << 12) /* Transition */
581 #define IAP_PREFETCH_RESERVED (0x2 << 12)
582 #define IAP_CORE_THIS (0x1 << 14)
583 #define IAP_CORE_ALL (0x3 << 14)
584 #define IAP_F_CMASK 0xFF000000
587 iap_perfctr_value_to_reload_count(pmc_value_t v)
590 /* If the PMC has overflowed, return a reload count of zero. */
591 if ((v & (1ULL << (core_iap_width - 1))) == 0)
593 v &= (1ULL << core_iap_width) - 1;
594 return (1ULL << core_iap_width) - v;
598 iap_reload_count_to_perfctr_value(pmc_value_t rlc)
600 return (1ULL << core_iap_width) - rlc;
604 iap_pmc_has_overflowed(int ri)
609 * We treat a Core (i.e., Intel architecture v1) PMC as has
610 * having overflowed if its MSB is zero.
613 return ((v & (1ULL << (core_iap_width - 1))) == 0);
617 iap_event_corei7_ok_on_counter(uint8_t evsel, int ri)
623 * Events valid only on counter 0, 1.
635 mask = ~0; /* Any row index is ok. */
638 return (mask & (1 << ri));
642 iap_event_westmere_ok_on_counter(uint8_t evsel, int ri)
648 * Events valid only on counter 0.
656 * Events valid only on counter 0, 1.
666 mask = ~0; /* Any row index is ok. */
669 return (mask & (1 << ri));
673 iap_event_sb_sbx_ib_ibx_ok_on_counter(uint8_t evsel, int ri)
678 /* Events valid only on counter 0. */
682 /* Events valid only on counter 1. */
686 /* Events valid only on counter 2. */
692 /* Events valid only on counter 3. */
698 mask = ~0; /* Any row index is ok. */
701 return (mask & (1 << ri));
705 iap_event_ok_on_counter(uint8_t evsel, int ri)
711 * Events valid only on counter 0.
723 * Events valid only on counter 1.
732 mask = ~0; /* Any row index is ok. */
735 return (mask & (1 << ri));
739 iap_allocate_pmc(int cpu, int ri, struct pmc *pm,
740 const struct pmc_op_pmcallocate *a)
743 const struct pmc_md_iap_op_pmcallocate *iap;
745 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
746 ("[core,%d] illegal CPU %d", __LINE__, cpu));
747 KASSERT(ri >= 0 && ri < core_iap_npmc,
748 ("[core,%d] illegal row-index value %d", __LINE__, ri));
750 if (a->pm_class != PMC_CLASS_IAP)
753 iap = &a->pm_md.pm_iap;
754 ev = IAP_EVSEL_GET(iap->pm_iap_config);
756 switch (core_cputype) {
757 case PMC_CPU_INTEL_COREI7:
758 case PMC_CPU_INTEL_NEHALEM_EX:
759 if (iap_event_corei7_ok_on_counter(ev, ri) == 0)
762 case PMC_CPU_INTEL_SKYLAKE:
763 case PMC_CPU_INTEL_SKYLAKE_XEON:
764 case PMC_CPU_INTEL_ICELAKE:
765 case PMC_CPU_INTEL_ICELAKE_XEON:
766 case PMC_CPU_INTEL_BROADWELL:
767 case PMC_CPU_INTEL_BROADWELL_XEON:
768 case PMC_CPU_INTEL_SANDYBRIDGE:
769 case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
770 case PMC_CPU_INTEL_IVYBRIDGE:
771 case PMC_CPU_INTEL_IVYBRIDGE_XEON:
772 case PMC_CPU_INTEL_HASWELL:
773 case PMC_CPU_INTEL_HASWELL_XEON:
774 if (iap_event_sb_sbx_ib_ibx_ok_on_counter(ev, ri) == 0)
777 case PMC_CPU_INTEL_WESTMERE:
778 case PMC_CPU_INTEL_WESTMERE_EX:
779 if (iap_event_westmere_ok_on_counter(ev, ri) == 0)
783 if (iap_event_ok_on_counter(ev, ri) == 0)
787 pm->pm_md.pm_iap.pm_iap_evsel = iap->pm_iap_config;
792 iap_config_pmc(int cpu, int ri, struct pmc *pm)
794 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
795 ("[core,%d] illegal CPU %d", __LINE__, cpu));
797 KASSERT(ri >= 0 && ri < core_iap_npmc,
798 ("[core,%d] illegal row-index %d", __LINE__, ri));
800 PMCDBG3(MDP,CFG,1, "iap-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
802 KASSERT(core_pcpu[cpu] != NULL, ("[core,%d] null per-cpu %d", __LINE__,
805 core_pcpu[cpu]->pc_corepmcs[ri].phw_pmc = pm;
811 iap_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
815 char iap_name[PMC_NAME_MAX];
817 phw = &core_pcpu[cpu]->pc_corepmcs[ri];
819 (void) snprintf(iap_name, sizeof(iap_name), "IAP-%d", ri);
820 if ((error = copystr(iap_name, pi->pm_name, PMC_NAME_MAX,
824 pi->pm_class = PMC_CLASS_IAP;
826 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
827 pi->pm_enabled = TRUE;
828 *ppmc = phw->phw_pmc;
830 pi->pm_enabled = FALSE;
838 iap_get_config(int cpu, int ri, struct pmc **ppm)
840 *ppm = core_pcpu[cpu]->pc_corepmcs[ri].phw_pmc;
846 iap_get_msr(int ri, uint32_t *msr)
848 KASSERT(ri >= 0 && ri < core_iap_npmc,
849 ("[iap,%d] ri %d out of range", __LINE__, ri));
857 iap_read_pmc(int cpu, int ri, pmc_value_t *v)
862 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
863 ("[core,%d] illegal cpu value %d", __LINE__, cpu));
864 KASSERT(ri >= 0 && ri < core_iap_npmc,
865 ("[core,%d] illegal row-index %d", __LINE__, ri));
867 pm = core_pcpu[cpu]->pc_corepmcs[ri].phw_pmc;
870 ("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu,
874 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
875 *v = iap_perfctr_value_to_reload_count(tmp);
877 *v = tmp & ((1ULL << core_iap_width) - 1);
879 PMCDBG4(MDP,REA,1, "iap-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
886 iap_release_pmc(int cpu, int ri, struct pmc *pm)
890 PMCDBG3(MDP,REL,1, "iap-release cpu=%d ri=%d pm=%p", cpu, ri,
893 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
894 ("[core,%d] illegal CPU value %d", __LINE__, cpu));
895 KASSERT(ri >= 0 && ri < core_iap_npmc,
896 ("[core,%d] illegal row-index %d", __LINE__, ri));
898 KASSERT(core_pcpu[cpu]->pc_corepmcs[ri].phw_pmc
899 == NULL, ("[core,%d] PHW pmc non-NULL", __LINE__));
905 iap_start_pmc(int cpu, int ri)
911 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
912 ("[core,%d] illegal CPU value %d", __LINE__, cpu));
913 KASSERT(ri >= 0 && ri < core_iap_npmc,
914 ("[core,%d] illegal row-index %d", __LINE__, ri));
917 pm = cc->pc_corepmcs[ri].phw_pmc;
920 ("[core,%d] starting cpu%d,ri%d with no pmc configured",
923 PMCDBG2(MDP,STA,1, "iap-start cpu=%d ri=%d", cpu, ri);
925 evsel = pm->pm_md.pm_iap.pm_iap_evsel;
927 PMCDBG4(MDP,STA,2, "iap-start/2 cpu=%d ri=%d evselmsr=0x%x evsel=0x%x",
928 cpu, ri, IAP_EVSEL0 + ri, evsel);
930 /* Event specific configuration. */
932 switch (IAP_EVSEL_GET(evsel)) {
934 wrmsr(IA_OFFCORE_RSP0, pm->pm_md.pm_iap.pm_iap_rsp);
937 wrmsr(IA_OFFCORE_RSP1, pm->pm_md.pm_iap.pm_iap_rsp);
943 wrmsr(IAP_EVSEL0 + ri, evsel | IAP_EN);
945 if (core_cputype == PMC_CPU_INTEL_CORE)
950 cc->pc_globalctrl |= (1ULL << ri);
951 wrmsr(IA_GLOBAL_CTRL, cc->pc_globalctrl);
952 } while (cc->pc_resync != 0);
958 iap_stop_pmc(int cpu, int ri)
960 struct pmc *pm __diagused;
964 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
965 ("[core,%d] illegal cpu value %d", __LINE__, cpu));
966 KASSERT(ri >= 0 && ri < core_iap_npmc,
967 ("[core,%d] illegal row index %d", __LINE__, ri));
970 pm = cc->pc_corepmcs[ri].phw_pmc;
973 ("[core,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
976 PMCDBG2(MDP,STO,1, "iap-stop cpu=%d ri=%d", cpu, ri);
978 msr = rdmsr(IAP_EVSEL0 + ri) & ~IAP_EVSEL_MASK;
979 wrmsr(IAP_EVSEL0 + ri, msr); /* stop hw */
981 if (core_cputype == PMC_CPU_INTEL_CORE)
986 cc->pc_globalctrl &= ~(1ULL << ri);
987 msr = rdmsr(IA_GLOBAL_CTRL) & ~IA_GLOBAL_CTRL_MASK;
988 wrmsr(IA_GLOBAL_CTRL, cc->pc_globalctrl);
989 } while (cc->pc_resync != 0);
995 iap_write_pmc(int cpu, int ri, pmc_value_t v)
1000 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1001 ("[core,%d] illegal cpu value %d", __LINE__, cpu));
1002 KASSERT(ri >= 0 && ri < core_iap_npmc,
1003 ("[core,%d] illegal row index %d", __LINE__, ri));
1005 cc = core_pcpu[cpu];
1006 pm = cc->pc_corepmcs[ri].phw_pmc;
1009 ("[core,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
1012 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1013 v = iap_reload_count_to_perfctr_value(v);
1015 v &= (1ULL << core_iap_width) - 1;
1017 PMCDBG4(MDP,WRI,1, "iap-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri,
1021 * Write the new value to the counter (or it's alias). The
1022 * counter will be in a stopped state when the pcd_write()
1023 * entry point is called.
1025 wrmsr(core_iap_wroffset + IAP_PMC0 + ri, v);
1031 iap_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth,
1034 struct pmc_classdep *pcd;
1036 KASSERT(md != NULL, ("[iap,%d] md is NULL", __LINE__));
1038 PMCDBG0(MDP,INI,1, "iap-initialize");
1040 /* Remember the set of architectural events supported. */
1041 core_architectural_events = ~flags;
1043 pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP];
1045 pcd->pcd_caps = IAP_PMC_CAPS;
1046 pcd->pcd_class = PMC_CLASS_IAP;
1047 pcd->pcd_num = npmc;
1048 pcd->pcd_ri = md->pmd_npmc;
1049 pcd->pcd_width = pmcwidth;
1051 pcd->pcd_allocate_pmc = iap_allocate_pmc;
1052 pcd->pcd_config_pmc = iap_config_pmc;
1053 pcd->pcd_describe = iap_describe;
1054 pcd->pcd_get_config = iap_get_config;
1055 pcd->pcd_get_msr = iap_get_msr;
1056 pcd->pcd_pcpu_fini = core_pcpu_fini;
1057 pcd->pcd_pcpu_init = core_pcpu_init;
1058 pcd->pcd_read_pmc = iap_read_pmc;
1059 pcd->pcd_release_pmc = iap_release_pmc;
1060 pcd->pcd_start_pmc = iap_start_pmc;
1061 pcd->pcd_stop_pmc = iap_stop_pmc;
1062 pcd->pcd_write_pmc = iap_write_pmc;
1064 md->pmd_npmc += npmc;
1068 core_intr(struct trapframe *tf)
1072 struct core_cpu *cc;
1073 int error, found_interrupt, ri;
1076 PMCDBG3(MDP,INT, 1, "cpu=%d tf=0x%p um=%d", curcpu, (void *) tf,
1077 TRAPF_USERMODE(tf));
1079 found_interrupt = 0;
1080 cc = core_pcpu[curcpu];
1082 for (ri = 0; ri < core_iap_npmc; ri++) {
1084 if ((pm = cc->pc_corepmcs[ri].phw_pmc) == NULL ||
1085 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1088 if (!iap_pmc_has_overflowed(ri))
1091 found_interrupt = 1;
1093 if (pm->pm_state != PMC_STATE_RUNNING)
1096 error = pmc_process_interrupt(PMC_HR, pm, tf);
1098 v = pm->pm_sc.pm_reloadcount;
1099 v = iap_reload_count_to_perfctr_value(v);
1102 * Stop the counter, reload it but only restart it if
1103 * the PMC is not stalled.
1105 msr = rdmsr(IAP_EVSEL0 + ri) & ~IAP_EVSEL_MASK;
1106 wrmsr(IAP_EVSEL0 + ri, msr);
1107 wrmsr(core_iap_wroffset + IAP_PMC0 + ri, v);
1112 wrmsr(IAP_EVSEL0 + ri, msr | (pm->pm_md.pm_iap.pm_iap_evsel |
1116 if (found_interrupt)
1117 lapic_reenable_pmc();
1119 if (found_interrupt)
1120 counter_u64_add(pmc_stats.pm_intr_processed, 1);
1122 counter_u64_add(pmc_stats.pm_intr_ignored, 1);
1124 return (found_interrupt);
1128 core2_intr(struct trapframe *tf)
1130 int error, found_interrupt, n, cpu;
1131 uint64_t flag, intrstatus, intrenable, msr;
1133 struct core_cpu *cc;
1137 PMCDBG3(MDP,INT, 1, "cpu=%d tf=0x%p um=%d", cpu, (void *) tf,
1138 TRAPF_USERMODE(tf));
1141 * The IA_GLOBAL_STATUS (MSR 0x38E) register indicates which
1142 * PMCs have a pending PMI interrupt. We take a 'snapshot' of
1143 * the current set of interrupting PMCs and process these
1144 * after stopping them.
1146 intrstatus = rdmsr(IA_GLOBAL_STATUS);
1147 intrenable = intrstatus & core_pmcmask;
1149 PMCDBG2(MDP,INT, 1, "cpu=%d intrstatus=%jx", cpu,
1150 (uintmax_t) intrstatus);
1152 found_interrupt = 0;
1153 cc = core_pcpu[cpu];
1155 KASSERT(cc != NULL, ("[core,%d] null pcpu", __LINE__));
1157 cc->pc_globalctrl &= ~intrenable;
1158 cc->pc_resync = 1; /* MSRs now potentially out of sync. */
1161 * Stop PMCs and clear overflow status bits.
1163 msr = rdmsr(IA_GLOBAL_CTRL) & ~IA_GLOBAL_CTRL_MASK;
1164 wrmsr(IA_GLOBAL_CTRL, msr);
1165 wrmsr(IA_GLOBAL_OVF_CTRL, intrenable |
1166 IA_GLOBAL_STATUS_FLAG_OVFBUF |
1167 IA_GLOBAL_STATUS_FLAG_CONDCHG);
1170 * Look for interrupts from fixed function PMCs.
1172 for (n = 0, flag = (1ULL << IAF_OFFSET); n < core_iaf_npmc;
1175 if ((intrstatus & flag) == 0)
1178 found_interrupt = 1;
1180 pm = cc->pc_corepmcs[n + core_iaf_ri].phw_pmc;
1181 if (pm == NULL || pm->pm_state != PMC_STATE_RUNNING ||
1182 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1185 error = pmc_process_interrupt(PMC_HR, pm, tf);
1188 intrenable &= ~flag;
1190 v = iaf_reload_count_to_perfctr_value(pm->pm_sc.pm_reloadcount);
1192 /* Reload sampling count. */
1193 wrmsr(IAF_CTR0 + n, v);
1195 PMCDBG4(MDP,INT, 1, "iaf-intr cpu=%d error=%d v=%jx(%jx)", curcpu,
1196 error, (uintmax_t) v, (uintmax_t) rdpmc(IAF_RI_TO_MSR(n)));
1200 * Process interrupts from the programmable counters.
1202 for (n = 0, flag = 1; n < core_iap_npmc; n++, flag <<= 1) {
1203 if ((intrstatus & flag) == 0)
1206 found_interrupt = 1;
1208 pm = cc->pc_corepmcs[n].phw_pmc;
1209 if (pm == NULL || pm->pm_state != PMC_STATE_RUNNING ||
1210 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1213 error = pmc_process_interrupt(PMC_HR, pm, tf);
1215 intrenable &= ~flag;
1217 v = iap_reload_count_to_perfctr_value(pm->pm_sc.pm_reloadcount);
1219 PMCDBG3(MDP,INT, 1, "iap-intr cpu=%d error=%d v=%jx", cpu, error,
1222 /* Reload sampling count. */
1223 wrmsr(core_iap_wroffset + IAP_PMC0 + n, v);
1227 * Reenable all non-stalled PMCs.
1229 PMCDBG2(MDP,INT, 1, "cpu=%d intrenable=%jx", cpu,
1230 (uintmax_t) intrenable);
1232 cc->pc_globalctrl |= intrenable;
1234 wrmsr(IA_GLOBAL_CTRL, cc->pc_globalctrl & IA_GLOBAL_CTRL_MASK);
1236 PMCDBG5(MDP,INT, 1, "cpu=%d fixedctrl=%jx globalctrl=%jx status=%jx "
1237 "ovf=%jx", cpu, (uintmax_t) rdmsr(IAF_CTRL),
1238 (uintmax_t) rdmsr(IA_GLOBAL_CTRL),
1239 (uintmax_t) rdmsr(IA_GLOBAL_STATUS),
1240 (uintmax_t) rdmsr(IA_GLOBAL_OVF_CTRL));
1242 if (found_interrupt)
1243 lapic_reenable_pmc();
1245 if (found_interrupt)
1246 counter_u64_add(pmc_stats.pm_intr_processed, 1);
1248 counter_u64_add(pmc_stats.pm_intr_ignored, 1);
1250 return (found_interrupt);
1254 pmc_core_initialize(struct pmc_mdep *md, int maxcpu, int version_override)
1256 int cpuid[CORE_CPUID_REQUEST_SIZE];
1257 int ipa_version, flags, nflags;
1259 do_cpuid(CORE_CPUID_REQUEST, cpuid);
1261 ipa_version = (version_override > 0) ? version_override :
1262 cpuid[CORE_CPUID_EAX] & 0xFF;
1263 core_cputype = md->pmd_cputype;
1265 PMCDBG3(MDP,INI,1,"core-init cputype=%d ncpu=%d ipa-version=%d",
1266 core_cputype, maxcpu, ipa_version);
1268 if (ipa_version < 1 || ipa_version > 5 ||
1269 (core_cputype != PMC_CPU_INTEL_CORE && ipa_version == 1)) {
1270 /* Unknown PMC architecture. */
1271 printf("hwpc_core: unknown PMC architecture: %d\n",
1273 return (EPROGMISMATCH);
1276 core_iap_wroffset = 0;
1277 if (cpu_feature2 & CPUID2_PDCM) {
1278 if (rdmsr(IA32_PERF_CAPABILITIES) & PERFCAP_FW_WRITE) {
1279 PMCDBG0(MDP, INI, 1,
1280 "core-init full-width write supported");
1281 core_iap_wroffset = IAP_A_PMC0 - IAP_PMC0;
1283 PMCDBG0(MDP, INI, 1,
1284 "core-init full-width write NOT supported");
1286 PMCDBG0(MDP, INI, 1, "core-init pdcm not supported");
1291 * Initialize programmable counters.
1293 core_iap_npmc = (cpuid[CORE_CPUID_EAX] >> 8) & 0xFF;
1294 core_iap_width = (cpuid[CORE_CPUID_EAX] >> 16) & 0xFF;
1296 core_pmcmask |= ((1ULL << core_iap_npmc) - 1);
1298 nflags = (cpuid[CORE_CPUID_EAX] >> 24) & 0xFF;
1299 flags = cpuid[CORE_CPUID_EBX] & ((1 << nflags) - 1);
1301 iap_initialize(md, maxcpu, core_iap_npmc, core_iap_width, flags);
1304 * Initialize fixed function counters, if present.
1306 if (core_cputype != PMC_CPU_INTEL_CORE) {
1307 core_iaf_ri = core_iap_npmc;
1308 core_iaf_npmc = cpuid[CORE_CPUID_EDX] & 0x1F;
1309 core_iaf_width = (cpuid[CORE_CPUID_EDX] >> 5) & 0xFF;
1311 iaf_initialize(md, maxcpu, core_iaf_npmc, core_iaf_width);
1312 core_pmcmask |= ((1ULL << core_iaf_npmc) - 1) << IAF_OFFSET;
1315 PMCDBG2(MDP,INI,1,"core-init pmcmask=0x%jx iafri=%d", core_pmcmask,
1318 core_pcpu = malloc(sizeof(*core_pcpu) * maxcpu, M_PMC,
1322 * Choose the appropriate interrupt handler.
1324 if (ipa_version == 1)
1325 md->pmd_intr = core_intr;
1327 md->pmd_intr = core2_intr;
1329 md->pmd_pcpu_fini = NULL;
1330 md->pmd_pcpu_init = NULL;
1336 pmc_core_finalize(struct pmc_mdep *md)
1338 PMCDBG0(MDP,INI,1, "core-finalize");
1340 free(core_pcpu, M_PMC);