2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2010 Fabien Thomas
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
39 #include <sys/pmckern.h>
40 #include <sys/systm.h>
42 #include <machine/intr_machdep.h>
43 #if (__FreeBSD_version >= 1100000)
44 #include <x86/apicvar.h>
46 #include <machine/apicvar.h>
48 #include <machine/cpu.h>
49 #include <machine/cpufunc.h>
50 #include <machine/specialreg.h>
52 #define UCF_PMC_CAPS \
53 (PMC_CAP_READ | PMC_CAP_WRITE)
55 #define UCP_PMC_CAPS \
56 (PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
57 PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE)
59 #define SELECTSEL(x) \
60 (((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \
61 UCP_CB0_EVSEL0 : UCP_EVSEL0)
63 #define SELECTOFF(x) \
64 (((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \
65 UCF_OFFSET_SB : UCF_OFFSET)
67 static enum pmc_cputype uncore_cputype;
70 volatile uint32_t pc_resync;
71 volatile uint32_t pc_ucfctrl; /* Fixed function control. */
72 volatile uint64_t pc_globalctrl; /* Global control register. */
73 struct pmc_hw pc_uncorepmcs[];
76 static struct uncore_cpu **uncore_pcpu;
78 static uint64_t uncore_pmcmask;
80 static int uncore_ucf_ri; /* relative index of fixed counters */
81 static int uncore_ucf_width;
82 static int uncore_ucf_npmc;
84 static int uncore_ucp_width;
85 static int uncore_ucp_npmc;
88 uncore_pcpu_noop(struct pmc_mdep *md, int cpu)
96 uncore_pcpu_init(struct pmc_mdep *md, int cpu)
99 struct uncore_cpu *cc;
101 int uncore_ri, n, npmc;
103 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
104 ("[ucf,%d] insane cpu number %d", __LINE__, cpu));
106 PMCDBG1(MDP,INI,1,"uncore-init cpu=%d", cpu);
108 uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
109 npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
110 npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
112 cc = malloc(sizeof(struct uncore_cpu) + npmc * sizeof(struct pmc_hw),
113 M_PMC, M_WAITOK | M_ZERO);
115 uncore_pcpu[cpu] = cc;
118 KASSERT(pc != NULL && cc != NULL,
119 ("[uncore,%d] NULL per-cpu structures cpu=%d", __LINE__, cpu));
121 for (n = 0, phw = cc->pc_uncorepmcs; n < npmc; n++, phw++) {
122 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
123 PMC_PHW_CPU_TO_STATE(cpu) |
124 PMC_PHW_INDEX_TO_STATE(n + uncore_ri);
126 pc->pc_hwpmcs[n + uncore_ri] = phw;
133 uncore_pcpu_fini(struct pmc_mdep *md, int cpu)
135 int uncore_ri, n, npmc;
137 struct uncore_cpu *cc;
139 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
140 ("[uncore,%d] insane cpu number (%d)", __LINE__, cpu));
142 PMCDBG1(MDP,INI,1,"uncore-pcpu-fini cpu=%d", cpu);
144 if ((cc = uncore_pcpu[cpu]) == NULL)
147 uncore_pcpu[cpu] = NULL;
151 KASSERT(pc != NULL, ("[uncore,%d] NULL per-cpu %d state", __LINE__,
154 npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
155 uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
157 for (n = 0; n < npmc; n++)
158 wrmsr(SELECTSEL(uncore_cputype) + n, 0);
161 npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
163 for (n = 0; n < npmc; n++)
164 pc->pc_hwpmcs[n + uncore_ri] = NULL;
172 * Fixed function counters.
176 ucf_perfctr_value_to_reload_count(pmc_value_t v)
178 v &= (1ULL << uncore_ucf_width) - 1;
179 return (1ULL << uncore_ucf_width) - v;
183 ucf_reload_count_to_perfctr_value(pmc_value_t rlc)
185 return (1ULL << uncore_ucf_width) - rlc;
189 ucf_allocate_pmc(int cpu, int ri, struct pmc *pm,
190 const struct pmc_op_pmcallocate *a)
193 uint32_t caps, flags;
195 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
196 ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
198 PMCDBG2(MDP,ALL,1, "ucf-allocate ri=%d reqcaps=0x%x", ri, pm->pm_caps);
200 if (ri < 0 || ri > uncore_ucf_npmc)
205 if (a->pm_class != PMC_CLASS_UCF ||
206 (caps & UCF_PMC_CAPS) != caps)
212 pm->pm_md.pm_ucf.pm_ucf_ctrl = (flags << (ri * 4));
214 PMCDBG1(MDP,ALL,2, "ucf-allocate config=0x%jx",
215 (uintmax_t) pm->pm_md.pm_ucf.pm_ucf_ctrl);
221 ucf_config_pmc(int cpu, int ri, struct pmc *pm)
223 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
224 ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
226 KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
227 ("[uncore,%d] illegal row-index %d", __LINE__, ri));
229 PMCDBG3(MDP,CFG,1, "ucf-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
231 KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
234 uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc = pm;
240 ucf_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
244 char ucf_name[PMC_NAME_MAX];
246 phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri];
248 (void) snprintf(ucf_name, sizeof(ucf_name), "UCF-%d", ri);
249 if ((error = copystr(ucf_name, pi->pm_name, PMC_NAME_MAX,
253 pi->pm_class = PMC_CLASS_UCF;
255 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
256 pi->pm_enabled = TRUE;
257 *ppmc = phw->phw_pmc;
259 pi->pm_enabled = FALSE;
267 ucf_get_config(int cpu, int ri, struct pmc **ppm)
269 *ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
275 ucf_read_pmc(int cpu, int ri, pmc_value_t *v)
280 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
281 ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
282 KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
283 ("[uncore,%d] illegal row-index %d", __LINE__, ri));
285 pm = uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
288 ("[uncore,%d] cpu %d ri %d(%d) pmc not configured", __LINE__, cpu,
289 ri, ri + uncore_ucf_ri));
291 tmp = rdmsr(UCF_CTR0 + ri);
293 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
294 *v = ucf_perfctr_value_to_reload_count(tmp);
298 PMCDBG3(MDP,REA,1, "ucf-read cpu=%d ri=%d -> v=%jx", cpu, ri, *v);
304 ucf_release_pmc(int cpu, int ri, struct pmc *pmc)
306 PMCDBG3(MDP,REL,1, "ucf-release cpu=%d ri=%d pm=%p", cpu, ri, pmc);
308 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
309 ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
310 KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
311 ("[uncore,%d] illegal row-index %d", __LINE__, ri));
313 KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc == NULL,
314 ("[uncore,%d] PHW pmc non-NULL", __LINE__));
320 ucf_start_pmc(int cpu, int ri)
323 struct uncore_cpu *ucfc;
325 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
326 ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
327 KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
328 ("[uncore,%d] illegal row-index %d", __LINE__, ri));
330 PMCDBG2(MDP,STA,1,"ucf-start cpu=%d ri=%d", cpu, ri);
332 ucfc = uncore_pcpu[cpu];
333 pm = ucfc->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
335 ucfc->pc_ucfctrl |= pm->pm_md.pm_ucf.pm_ucf_ctrl;
337 wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
341 ucfc->pc_globalctrl |= (1ULL << (ri + SELECTOFF(uncore_cputype)));
342 wrmsr(UC_GLOBAL_CTRL, ucfc->pc_globalctrl);
343 } while (ucfc->pc_resync != 0);
345 PMCDBG4(MDP,STA,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
346 ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
347 ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
353 ucf_stop_pmc(int cpu, int ri)
356 struct uncore_cpu *ucfc;
358 PMCDBG2(MDP,STO,1,"ucf-stop cpu=%d ri=%d", cpu, ri);
360 ucfc = uncore_pcpu[cpu];
362 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
363 ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
364 KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
365 ("[uncore,%d] illegal row-index %d", __LINE__, ri));
367 fc = (UCF_MASK << (ri * 4));
369 ucfc->pc_ucfctrl &= ~fc;
371 PMCDBG1(MDP,STO,1,"ucf-stop ucfctrl=%x", ucfc->pc_ucfctrl);
372 wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
376 ucfc->pc_globalctrl &= ~(1ULL << (ri + SELECTOFF(uncore_cputype)));
377 wrmsr(UC_GLOBAL_CTRL, ucfc->pc_globalctrl);
378 } while (ucfc->pc_resync != 0);
380 PMCDBG4(MDP,STO,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
381 ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
382 ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
388 ucf_write_pmc(int cpu, int ri, pmc_value_t v)
390 struct uncore_cpu *cc;
393 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
394 ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
395 KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
396 ("[uncore,%d] illegal row-index %d", __LINE__, ri));
398 cc = uncore_pcpu[cpu];
399 pm = cc->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
402 ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
404 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
405 v = ucf_reload_count_to_perfctr_value(v);
407 wrmsr(UCF_CTRL, 0); /* Turn off fixed counters */
408 wrmsr(UCF_CTR0 + ri, v);
409 wrmsr(UCF_CTRL, cc->pc_ucfctrl);
411 PMCDBG4(MDP,WRI,1, "ucf-write cpu=%d ri=%d v=%jx ucfctrl=%jx ",
412 cpu, ri, v, (uintmax_t) rdmsr(UCF_CTRL));
419 ucf_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
421 struct pmc_classdep *pcd;
423 KASSERT(md != NULL, ("[ucf,%d] md is NULL", __LINE__));
425 PMCDBG0(MDP,INI,1, "ucf-initialize");
427 pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF];
429 pcd->pcd_caps = UCF_PMC_CAPS;
430 pcd->pcd_class = PMC_CLASS_UCF;
432 pcd->pcd_ri = md->pmd_npmc;
433 pcd->pcd_width = pmcwidth;
435 pcd->pcd_allocate_pmc = ucf_allocate_pmc;
436 pcd->pcd_config_pmc = ucf_config_pmc;
437 pcd->pcd_describe = ucf_describe;
438 pcd->pcd_get_config = ucf_get_config;
439 pcd->pcd_get_msr = NULL;
440 pcd->pcd_pcpu_fini = uncore_pcpu_noop;
441 pcd->pcd_pcpu_init = uncore_pcpu_noop;
442 pcd->pcd_read_pmc = ucf_read_pmc;
443 pcd->pcd_release_pmc = ucf_release_pmc;
444 pcd->pcd_start_pmc = ucf_start_pmc;
445 pcd->pcd_stop_pmc = ucf_stop_pmc;
446 pcd->pcd_write_pmc = ucf_write_pmc;
448 md->pmd_npmc += npmc;
452 * Intel programmable PMCs.
456 * Event descriptor tables.
458 * For each event id, we track:
460 * 1. The CPUs that the event is valid for.
462 * 2. If the event uses a fixed UMASK, the value of the umask field.
463 * If the event doesn't use a fixed UMASK, a mask of legal bits
467 struct ucp_event_descr {
468 enum pmc_event ucp_ev;
469 unsigned char ucp_evcode;
470 unsigned char ucp_umask;
471 unsigned char ucp_flags;
474 #define UCP_F_I7 (1 << 0) /* CPU: Core i7 */
475 #define UCP_F_WM (1 << 1) /* CPU: Westmere */
476 #define UCP_F_SB (1 << 2) /* CPU: Sandy Bridge */
477 #define UCP_F_HW (1 << 3) /* CPU: Haswell */
478 #define UCP_F_FM (1 << 4) /* Fixed mask */
480 #define UCP_F_ALLCPUS \
481 (UCP_F_I7 | UCP_F_WM)
483 #define UCP_F_CMASK 0xFF000000
486 ucp_perfctr_value_to_reload_count(pmc_value_t v)
488 v &= (1ULL << uncore_ucp_width) - 1;
489 return (1ULL << uncore_ucp_width) - v;
493 ucp_reload_count_to_perfctr_value(pmc_value_t rlc)
495 return (1ULL << uncore_ucp_width) - rlc;
499 * Counter specific event information for Sandybridge and Haswell
502 ucp_event_sb_hw_ok_on_counter(uint8_t ev, int ri)
508 * Events valid only on counter 0.
516 mask = ~0; /* Any row index is ok. */
519 return (mask & (1 << ri));
523 ucp_allocate_pmc(int cpu, int ri, struct pmc *pm,
524 const struct pmc_op_pmcallocate *a)
528 const struct pmc_md_ucp_op_pmcallocate *ucp;
530 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
531 ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
532 KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
533 ("[uncore,%d] illegal row-index value %d", __LINE__, ri));
535 /* check requested capabilities */
537 if ((UCP_PMC_CAPS & caps) != caps)
540 ucp = &a->pm_md.pm_ucp;
541 ev = UCP_EVSEL(ucp->pm_ucp_config);
542 switch (uncore_cputype) {
543 case PMC_CPU_INTEL_HASWELL:
544 case PMC_CPU_INTEL_SANDYBRIDGE:
545 if (ucp_event_sb_hw_ok_on_counter(ev, ri) == 0)
552 pm->pm_md.pm_ucp.pm_ucp_evsel = ucp->pm_ucp_config | UCP_EN;
558 ucp_config_pmc(int cpu, int ri, struct pmc *pm)
560 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
561 ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
563 KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
564 ("[uncore,%d] illegal row-index %d", __LINE__, ri));
566 PMCDBG3(MDP,CFG,1, "ucp-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
568 KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
571 uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc = pm;
577 ucp_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
581 char ucp_name[PMC_NAME_MAX];
583 phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri];
585 (void) snprintf(ucp_name, sizeof(ucp_name), "UCP-%d", ri);
586 if ((error = copystr(ucp_name, pi->pm_name, PMC_NAME_MAX,
590 pi->pm_class = PMC_CLASS_UCP;
592 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
593 pi->pm_enabled = TRUE;
594 *ppmc = phw->phw_pmc;
596 pi->pm_enabled = FALSE;
604 ucp_get_config(int cpu, int ri, struct pmc **ppm)
606 *ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc;
612 ucp_read_pmc(int cpu, int ri, pmc_value_t *v)
617 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
618 ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
619 KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
620 ("[uncore,%d] illegal row-index %d", __LINE__, ri));
622 pm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc;
625 ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu,
628 tmp = rdmsr(UCP_PMC0 + ri);
629 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
630 *v = ucp_perfctr_value_to_reload_count(tmp);
634 PMCDBG4(MDP,REA,1, "ucp-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
641 ucp_release_pmc(int cpu, int ri, struct pmc *pm)
645 PMCDBG3(MDP,REL,1, "ucp-release cpu=%d ri=%d pm=%p", cpu, ri,
648 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
649 ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
650 KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
651 ("[uncore,%d] illegal row-index %d", __LINE__, ri));
653 KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc
654 == NULL, ("[uncore,%d] PHW pmc non-NULL", __LINE__));
660 ucp_start_pmc(int cpu, int ri)
664 struct uncore_cpu *cc;
666 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
667 ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
668 KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
669 ("[uncore,%d] illegal row-index %d", __LINE__, ri));
671 cc = uncore_pcpu[cpu];
672 pm = cc->pc_uncorepmcs[ri].phw_pmc;
675 ("[uncore,%d] starting cpu%d,ri%d with no pmc configured",
678 PMCDBG2(MDP,STA,1, "ucp-start cpu=%d ri=%d", cpu, ri);
680 evsel = pm->pm_md.pm_ucp.pm_ucp_evsel;
683 "ucp-start/2 cpu=%d ri=%d evselmsr=0x%x evsel=0x%x",
684 cpu, ri, SELECTSEL(uncore_cputype) + ri, evsel);
686 /* Event specific configuration. */
687 switch (pm->pm_event) {
688 case PMC_EV_UCP_EVENT_0CH_04H_E:
689 case PMC_EV_UCP_EVENT_0CH_08H_E:
690 wrmsr(MSR_GQ_SNOOP_MESF,0x2);
692 case PMC_EV_UCP_EVENT_0CH_04H_F:
693 case PMC_EV_UCP_EVENT_0CH_08H_F:
694 wrmsr(MSR_GQ_SNOOP_MESF,0x8);
696 case PMC_EV_UCP_EVENT_0CH_04H_M:
697 case PMC_EV_UCP_EVENT_0CH_08H_M:
698 wrmsr(MSR_GQ_SNOOP_MESF,0x1);
700 case PMC_EV_UCP_EVENT_0CH_04H_S:
701 case PMC_EV_UCP_EVENT_0CH_08H_S:
702 wrmsr(MSR_GQ_SNOOP_MESF,0x4);
707 wrmsr(SELECTSEL(uncore_cputype) + ri, evsel);
711 cc->pc_globalctrl |= (1ULL << ri);
712 wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl);
713 } while (cc->pc_resync != 0);
719 ucp_stop_pmc(int cpu, int ri)
722 struct uncore_cpu *cc;
724 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
725 ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
726 KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
727 ("[uncore,%d] illegal row index %d", __LINE__, ri));
729 cc = uncore_pcpu[cpu];
730 pm = cc->pc_uncorepmcs[ri].phw_pmc;
733 ("[uncore,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
736 PMCDBG2(MDP,STO,1, "ucp-stop cpu=%d ri=%d", cpu, ri);
739 wrmsr(SELECTSEL(uncore_cputype) + ri, 0);
743 cc->pc_globalctrl &= ~(1ULL << ri);
744 wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl);
745 } while (cc->pc_resync != 0);
751 ucp_write_pmc(int cpu, int ri, pmc_value_t v)
754 struct uncore_cpu *cc;
756 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
757 ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
758 KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
759 ("[uncore,%d] illegal row index %d", __LINE__, ri));
761 cc = uncore_pcpu[cpu];
762 pm = cc->pc_uncorepmcs[ri].phw_pmc;
765 ("[uncore,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
768 PMCDBG4(MDP,WRI,1, "ucp-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri,
771 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
772 v = ucp_reload_count_to_perfctr_value(v);
775 * Write the new value to the counter. The counter will be in
776 * a stopped state when the pcd_write() entry point is called.
779 wrmsr(UCP_PMC0 + ri, v);
786 ucp_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
788 struct pmc_classdep *pcd;
790 KASSERT(md != NULL, ("[ucp,%d] md is NULL", __LINE__));
792 PMCDBG0(MDP,INI,1, "ucp-initialize");
794 pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP];
796 pcd->pcd_caps = UCP_PMC_CAPS;
797 pcd->pcd_class = PMC_CLASS_UCP;
799 pcd->pcd_ri = md->pmd_npmc;
800 pcd->pcd_width = pmcwidth;
802 pcd->pcd_allocate_pmc = ucp_allocate_pmc;
803 pcd->pcd_config_pmc = ucp_config_pmc;
804 pcd->pcd_describe = ucp_describe;
805 pcd->pcd_get_config = ucp_get_config;
806 pcd->pcd_get_msr = NULL;
807 pcd->pcd_pcpu_fini = uncore_pcpu_fini;
808 pcd->pcd_pcpu_init = uncore_pcpu_init;
809 pcd->pcd_read_pmc = ucp_read_pmc;
810 pcd->pcd_release_pmc = ucp_release_pmc;
811 pcd->pcd_start_pmc = ucp_start_pmc;
812 pcd->pcd_stop_pmc = ucp_stop_pmc;
813 pcd->pcd_write_pmc = ucp_write_pmc;
815 md->pmd_npmc += npmc;
819 pmc_uncore_initialize(struct pmc_mdep *md, int maxcpu)
821 uncore_cputype = md->pmd_cputype;
825 * Initialize programmable counters.
829 uncore_ucp_width = 48;
831 uncore_pmcmask |= ((1ULL << uncore_ucp_npmc) - 1);
833 ucp_initialize(md, maxcpu, uncore_ucp_npmc, uncore_ucp_width);
836 * Initialize fixed function counters, if present.
838 uncore_ucf_ri = uncore_ucp_npmc;
840 uncore_ucf_width = 48;
842 ucf_initialize(md, maxcpu, uncore_ucf_npmc, uncore_ucf_width);
843 uncore_pmcmask |= ((1ULL << uncore_ucf_npmc) - 1) << SELECTOFF(uncore_cputype);
845 PMCDBG2(MDP,INI,1,"uncore-init pmcmask=0x%jx ucfri=%d", uncore_pmcmask,
848 uncore_pcpu = malloc(sizeof(*uncore_pcpu) * maxcpu, M_PMC,
855 pmc_uncore_finalize(struct pmc_mdep *md)
857 PMCDBG0(MDP,INI,1, "uncore-finalize");
859 free(uncore_pcpu, M_PMC);