]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/hwpmc/hwpmc_uncore.c
unbound: Vendor import 1.18.0
[FreeBSD/FreeBSD.git] / sys / dev / hwpmc / hwpmc_uncore.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2010 Fabien Thomas
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 /*
30  * Intel Uncore PMCs.
31  */
32
33 #include <sys/cdefs.h>
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/pmc.h>
37 #include <sys/pmckern.h>
38 #include <sys/systm.h>
39
40 #include <machine/intr_machdep.h>
41 #include <x86/apicvar.h>
42 #include <machine/cpu.h>
43 #include <machine/cpufunc.h>
44 #include <machine/specialreg.h>
45
46 #define UCF_PMC_CAPS \
47         (PMC_CAP_READ | PMC_CAP_WRITE)
48
49 #define UCP_PMC_CAPS \
50     (PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
51     PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE)
52
53 #define SELECTSEL(x) \
54         (((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \
55         UCP_CB0_EVSEL0 : UCP_EVSEL0)
56
57 #define SELECTOFF(x) \
58         (((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \
59         UCF_OFFSET_SB : UCF_OFFSET)
60
61 static enum pmc_cputype uncore_cputype;
62
63 struct uncore_cpu {
64         volatile uint32_t       pc_ucfctrl;     /* Fixed function control. */
65         volatile uint64_t       pc_globalctrl;  /* Global control register. */
66         struct pmc_hw           pc_uncorepmcs[];
67 };
68
69 static struct uncore_cpu **uncore_pcpu;
70
71 static uint64_t uncore_pmcmask;
72
73 static int uncore_ucf_ri;               /* relative index of fixed counters */
74 static int uncore_ucf_width;
75 static int uncore_ucf_npmc;
76
77 static int uncore_ucp_width;
78 static int uncore_ucp_npmc;
79
80 static int
81 uncore_pcpu_noop(struct pmc_mdep *md, int cpu)
82 {
83         (void) md;
84         (void) cpu;
85         return (0);
86 }
87
88 static int
89 uncore_pcpu_init(struct pmc_mdep *md, int cpu)
90 {
91         struct pmc_cpu *pc;
92         struct uncore_cpu *cc;
93         struct pmc_hw *phw;
94         int uncore_ri, n, npmc;
95
96         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
97             ("[ucf,%d] insane cpu number %d", __LINE__, cpu));
98
99         PMCDBG1(MDP,INI,1,"uncore-init cpu=%d", cpu);
100
101         uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
102         npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
103         npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
104
105         cc = malloc(sizeof(struct uncore_cpu) + npmc * sizeof(struct pmc_hw),
106             M_PMC, M_WAITOK | M_ZERO);
107
108         uncore_pcpu[cpu] = cc;
109         pc = pmc_pcpu[cpu];
110
111         KASSERT(pc != NULL && cc != NULL,
112             ("[uncore,%d] NULL per-cpu structures cpu=%d", __LINE__, cpu));
113
114         for (n = 0, phw = cc->pc_uncorepmcs; n < npmc; n++, phw++) {
115                 phw->phw_state    = PMC_PHW_FLAG_IS_ENABLED |
116                     PMC_PHW_CPU_TO_STATE(cpu) |
117                     PMC_PHW_INDEX_TO_STATE(n + uncore_ri);
118                 phw->phw_pmc      = NULL;
119                 pc->pc_hwpmcs[n + uncore_ri]  = phw;
120         }
121
122         return (0);
123 }
124
125 static int
126 uncore_pcpu_fini(struct pmc_mdep *md, int cpu)
127 {
128         int uncore_ri, n, npmc;
129         struct pmc_cpu *pc;
130         struct uncore_cpu *cc;
131
132         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
133             ("[uncore,%d] insane cpu number (%d)", __LINE__, cpu));
134
135         PMCDBG1(MDP,INI,1,"uncore-pcpu-fini cpu=%d", cpu);
136
137         if ((cc = uncore_pcpu[cpu]) == NULL)
138                 return (0);
139
140         uncore_pcpu[cpu] = NULL;
141
142         pc = pmc_pcpu[cpu];
143
144         KASSERT(pc != NULL, ("[uncore,%d] NULL per-cpu %d state", __LINE__,
145                 cpu));
146
147         npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
148         uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
149
150         for (n = 0; n < npmc; n++) 
151                 wrmsr(SELECTSEL(uncore_cputype) + n, 0);
152
153         wrmsr(UCF_CTRL, 0);
154         npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
155
156         for (n = 0; n < npmc; n++)
157                 pc->pc_hwpmcs[n + uncore_ri] = NULL;
158
159         free(cc, M_PMC);
160
161         return (0);
162 }
163
164 /*
165  * Fixed function counters.
166  */
167
168 static pmc_value_t
169 ucf_perfctr_value_to_reload_count(pmc_value_t v)
170 {
171
172         /* If the PMC has overflowed, return a reload count of zero. */
173         if ((v & (1ULL << (uncore_ucf_width - 1))) == 0)
174                 return (0);
175         v &= (1ULL << uncore_ucf_width) - 1;
176         return (1ULL << uncore_ucf_width) - v;
177 }
178
179 static pmc_value_t
180 ucf_reload_count_to_perfctr_value(pmc_value_t rlc)
181 {
182         return (1ULL << uncore_ucf_width) - rlc;
183 }
184
185 static int
186 ucf_allocate_pmc(int cpu, int ri, struct pmc *pm,
187     const struct pmc_op_pmcallocate *a)
188 {
189         uint32_t flags;
190
191         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
192             ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
193
194         PMCDBG2(MDP,ALL,1, "ucf-allocate ri=%d reqcaps=0x%x", ri, pm->pm_caps);
195
196         if (ri < 0 || ri > uncore_ucf_npmc)
197                 return (EINVAL);
198
199         if (a->pm_class != PMC_CLASS_UCF)
200                 return (EINVAL);
201
202         if ((a->pm_flags & PMC_F_EV_PMU) == 0)
203                 return (EINVAL);
204
205         flags = UCF_EN;
206
207         pm->pm_md.pm_ucf.pm_ucf_ctrl = (flags << (ri * 4));
208
209         PMCDBG1(MDP,ALL,2, "ucf-allocate config=0x%jx",
210             (uintmax_t) pm->pm_md.pm_ucf.pm_ucf_ctrl);
211
212         return (0);
213 }
214
215 static int
216 ucf_config_pmc(int cpu, int ri, struct pmc *pm)
217 {
218         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
219             ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
220
221         KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
222             ("[uncore,%d] illegal row-index %d", __LINE__, ri));
223
224         PMCDBG3(MDP,CFG,1, "ucf-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
225
226         KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
227             cpu));
228
229         uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc = pm;
230
231         return (0);
232 }
233
234 static int
235 ucf_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
236 {
237         struct pmc_hw *phw;
238
239         phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri];
240
241         snprintf(pi->pm_name, sizeof(pi->pm_name), "UCF-%d", ri);
242         pi->pm_class = PMC_CLASS_UCF;
243
244         if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
245                 pi->pm_enabled = TRUE;
246                 *ppmc          = phw->phw_pmc;
247         } else {
248                 pi->pm_enabled = FALSE;
249                 *ppmc          = NULL;
250         }
251
252         return (0);
253 }
254
255 static int
256 ucf_get_config(int cpu, int ri, struct pmc **ppm)
257 {
258         *ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
259
260         return (0);
261 }
262
263 static int
264 ucf_read_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t *v)
265 {
266         pmc_value_t tmp;
267
268         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
269             ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
270         KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
271             ("[uncore,%d] illegal row-index %d", __LINE__, ri));
272
273         tmp = rdmsr(UCF_CTR0 + ri);
274
275         if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
276                 *v = ucf_perfctr_value_to_reload_count(tmp);
277         else
278                 *v = tmp;
279
280         PMCDBG3(MDP,REA,1, "ucf-read cpu=%d ri=%d -> v=%jx", cpu, ri, *v);
281
282         return (0);
283 }
284
285 static int
286 ucf_release_pmc(int cpu, int ri, struct pmc *pmc)
287 {
288         PMCDBG3(MDP,REL,1, "ucf-release cpu=%d ri=%d pm=%p", cpu, ri, pmc);
289
290         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
291             ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
292         KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
293             ("[uncore,%d] illegal row-index %d", __LINE__, ri));
294
295         KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc == NULL,
296             ("[uncore,%d] PHW pmc non-NULL", __LINE__));
297
298         return (0);
299 }
300
301 static int
302 ucf_start_pmc(int cpu, int ri, struct pmc *pm)
303 {
304         struct uncore_cpu *ucfc;
305
306         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
307             ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
308         KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
309             ("[uncore,%d] illegal row-index %d", __LINE__, ri));
310
311         PMCDBG2(MDP,STA,1,"ucf-start cpu=%d ri=%d", cpu, ri);
312
313         ucfc = uncore_pcpu[cpu];
314         ucfc->pc_ucfctrl |= pm->pm_md.pm_ucf.pm_ucf_ctrl;
315
316         wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
317
318         ucfc->pc_globalctrl |= (1ULL << (ri + SELECTOFF(uncore_cputype)));
319         wrmsr(UC_GLOBAL_CTRL, ucfc->pc_globalctrl);
320
321         PMCDBG4(MDP,STA,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
322             ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
323             ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
324
325         return (0);
326 }
327
328 static int
329 ucf_stop_pmc(int cpu, int ri, struct pmc *pm __unused)
330 {
331         uint32_t fc;
332         struct uncore_cpu *ucfc;
333
334         PMCDBG2(MDP,STO,1,"ucf-stop cpu=%d ri=%d", cpu, ri);
335
336         ucfc = uncore_pcpu[cpu];
337
338         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
339             ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
340         KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
341             ("[uncore,%d] illegal row-index %d", __LINE__, ri));
342
343         fc = (UCF_MASK << (ri * 4));
344
345         ucfc->pc_ucfctrl &= ~fc;
346
347         PMCDBG1(MDP,STO,1,"ucf-stop ucfctrl=%x", ucfc->pc_ucfctrl);
348         wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
349
350         /* Don't need to write UC_GLOBAL_CTRL, one disable is enough. */
351
352         PMCDBG4(MDP,STO,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
353             ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
354             ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
355
356         return (0);
357 }
358
359 static int
360 ucf_write_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t v)
361 {
362         struct uncore_cpu *cc;
363
364         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
365             ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
366         KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
367             ("[uncore,%d] illegal row-index %d", __LINE__, ri));
368
369         cc = uncore_pcpu[cpu];
370
371         if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
372                 v = ucf_reload_count_to_perfctr_value(v);
373
374         wrmsr(UCF_CTRL, 0);     /* Turn off fixed counters */
375         wrmsr(UCF_CTR0 + ri, v);
376         wrmsr(UCF_CTRL, cc->pc_ucfctrl);
377
378         PMCDBG4(MDP,WRI,1, "ucf-write cpu=%d ri=%d v=%jx ucfctrl=%jx ",
379             cpu, ri, v, (uintmax_t) rdmsr(UCF_CTRL));
380
381         return (0);
382 }
383
384
385 static void
386 ucf_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
387 {
388         struct pmc_classdep *pcd;
389
390         KASSERT(md != NULL, ("[ucf,%d] md is NULL", __LINE__));
391
392         PMCDBG0(MDP,INI,1, "ucf-initialize");
393
394         pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF];
395
396         pcd->pcd_caps   = UCF_PMC_CAPS;
397         pcd->pcd_class  = PMC_CLASS_UCF;
398         pcd->pcd_num    = npmc;
399         pcd->pcd_ri     = md->pmd_npmc;
400         pcd->pcd_width  = pmcwidth;
401
402         pcd->pcd_allocate_pmc   = ucf_allocate_pmc;
403         pcd->pcd_config_pmc     = ucf_config_pmc;
404         pcd->pcd_describe       = ucf_describe;
405         pcd->pcd_get_config     = ucf_get_config;
406         pcd->pcd_get_msr        = NULL;
407         pcd->pcd_pcpu_fini      = uncore_pcpu_noop;
408         pcd->pcd_pcpu_init      = uncore_pcpu_noop;
409         pcd->pcd_read_pmc       = ucf_read_pmc;
410         pcd->pcd_release_pmc    = ucf_release_pmc;
411         pcd->pcd_start_pmc      = ucf_start_pmc;
412         pcd->pcd_stop_pmc       = ucf_stop_pmc;
413         pcd->pcd_write_pmc      = ucf_write_pmc;
414
415         md->pmd_npmc           += npmc;
416 }
417
418 /*
419  * Intel programmable PMCs.
420  */
421
422 /*
423  * Event descriptor tables.
424  *
425  * For each event id, we track:
426  *
427  * 1. The CPUs that the event is valid for.
428  *
429  * 2. If the event uses a fixed UMASK, the value of the umask field.
430  *    If the event doesn't use a fixed UMASK, a mask of legal bits
431  *    to check against.
432  */
433
434 struct ucp_event_descr {
435         enum pmc_event  ucp_ev;
436         unsigned char   ucp_evcode;
437         unsigned char   ucp_umask;
438         unsigned char   ucp_flags;
439 };
440
441 #define UCP_F_I7        (1 << 0)        /* CPU: Core i7 */
442 #define UCP_F_WM        (1 << 1)        /* CPU: Westmere */
443 #define UCP_F_SB        (1 << 2)        /* CPU: Sandy Bridge */
444 #define UCP_F_HW        (1 << 3)        /* CPU: Haswell */
445 #define UCP_F_FM        (1 << 4)        /* Fixed mask */
446
447 #define UCP_F_ALLCPUS                                   \
448     (UCP_F_I7 | UCP_F_WM)
449
450 #define UCP_F_CMASK             0xFF000000
451
452 static pmc_value_t
453 ucp_perfctr_value_to_reload_count(pmc_value_t v)
454 {
455         v &= (1ULL << uncore_ucp_width) - 1;
456         return (1ULL << uncore_ucp_width) - v;
457 }
458
459 static pmc_value_t
460 ucp_reload_count_to_perfctr_value(pmc_value_t rlc)
461 {
462         return (1ULL << uncore_ucp_width) - rlc;
463 }
464
465 /*
466  * Counter specific event information for Sandybridge and Haswell
467  */
468 static int
469 ucp_event_sb_hw_ok_on_counter(uint8_t ev, int ri)
470 {
471         uint32_t mask;
472
473         switch (ev) {
474                 /*
475                  * Events valid only on counter 0.
476                  */
477                 case 0x80:
478                 case 0x83:
479                 mask = (1 << 0);
480                 break;
481
482         default:
483                 mask = ~0;      /* Any row index is ok. */
484         }
485
486         return (mask & (1 << ri));
487 }
488
489 static int
490 ucp_allocate_pmc(int cpu, int ri, struct pmc *pm,
491     const struct pmc_op_pmcallocate *a)
492 {
493         uint8_t ev;
494         const struct pmc_md_ucp_op_pmcallocate *ucp;
495
496         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
497             ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
498         KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
499             ("[uncore,%d] illegal row-index value %d", __LINE__, ri));
500
501         if (a->pm_class != PMC_CLASS_UCP)
502                 return (EINVAL);
503
504         if ((a->pm_flags & PMC_F_EV_PMU) == 0)
505                 return (EINVAL);
506
507         ucp = &a->pm_md.pm_ucp;
508         ev = UCP_EVSEL(ucp->pm_ucp_config);
509         switch (uncore_cputype) {
510         case PMC_CPU_INTEL_HASWELL:
511         case PMC_CPU_INTEL_SANDYBRIDGE:
512                 if (ucp_event_sb_hw_ok_on_counter(ev, ri) == 0)
513                         return (EINVAL);
514                 break;
515         default:
516                 break;
517         }
518
519         pm->pm_md.pm_ucp.pm_ucp_evsel = ucp->pm_ucp_config | UCP_EN;
520
521         return (0);
522 }
523
524 static int
525 ucp_config_pmc(int cpu, int ri, struct pmc *pm)
526 {
527         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
528             ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
529
530         KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
531             ("[uncore,%d] illegal row-index %d", __LINE__, ri));
532
533         PMCDBG3(MDP,CFG,1, "ucp-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
534
535         KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
536             cpu));
537
538         uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc = pm;
539
540         return (0);
541 }
542
543 static int
544 ucp_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
545 {
546         struct pmc_hw *phw;
547
548         phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri];
549
550         snprintf(pi->pm_name, sizeof(pi->pm_name), "UCP-%d", ri);
551         pi->pm_class = PMC_CLASS_UCP;
552
553         if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
554                 pi->pm_enabled = TRUE;
555                 *ppmc          = phw->phw_pmc;
556         } else {
557                 pi->pm_enabled = FALSE;
558                 *ppmc          = NULL;
559         }
560
561         return (0);
562 }
563
564 static int
565 ucp_get_config(int cpu, int ri, struct pmc **ppm)
566 {
567         *ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc;
568
569         return (0);
570 }
571
572 static int
573 ucp_read_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t *v)
574 {
575         pmc_value_t tmp;
576
577         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
578             ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
579         KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
580             ("[uncore,%d] illegal row-index %d", __LINE__, ri));
581
582         tmp = rdmsr(UCP_PMC0 + ri);
583         if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
584                 *v = ucp_perfctr_value_to_reload_count(tmp);
585         else
586                 *v = tmp;
587
588         PMCDBG4(MDP,REA,1, "ucp-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
589             ri, *v);
590
591         return (0);
592 }
593
594 static int
595 ucp_release_pmc(int cpu, int ri, struct pmc *pm)
596 {
597         (void) pm;
598
599         PMCDBG3(MDP,REL,1, "ucp-release cpu=%d ri=%d pm=%p", cpu, ri,
600             pm);
601
602         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
603             ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
604         KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
605             ("[uncore,%d] illegal row-index %d", __LINE__, ri));
606
607         KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc
608             == NULL, ("[uncore,%d] PHW pmc non-NULL", __LINE__));
609
610         return (0);
611 }
612
613 static int
614 ucp_start_pmc(int cpu, int ri, struct pmc *pm)
615 {
616         uint64_t evsel;
617         struct uncore_cpu *cc;
618
619         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
620             ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
621         KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
622             ("[uncore,%d] illegal row-index %d", __LINE__, ri));
623
624         cc = uncore_pcpu[cpu];
625
626         PMCDBG2(MDP,STA,1, "ucp-start cpu=%d ri=%d", cpu, ri);
627
628         evsel = pm->pm_md.pm_ucp.pm_ucp_evsel;
629
630         PMCDBG4(MDP,STA,2,
631             "ucp-start/2 cpu=%d ri=%d evselmsr=0x%x evsel=0x%x",
632             cpu, ri, SELECTSEL(uncore_cputype) + ri, evsel);
633
634         wrmsr(SELECTSEL(uncore_cputype) + ri, evsel);
635
636         cc->pc_globalctrl |= (1ULL << ri);
637         wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl);
638
639         return (0);
640 }
641
642 static int
643 ucp_stop_pmc(int cpu, int ri, struct pmc *pm __unused)
644 {
645
646         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
647             ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
648         KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
649             ("[uncore,%d] illegal row index %d", __LINE__, ri));
650
651         PMCDBG2(MDP,STO,1, "ucp-stop cpu=%d ri=%d", cpu, ri);
652
653         /* stop hw. */
654         wrmsr(SELECTSEL(uncore_cputype) + ri, 0);
655
656         /* Don't need to write UC_GLOBAL_CTRL, one disable is enough. */
657
658         return (0);
659 }
660
661 static int
662 ucp_write_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t v)
663 {
664
665         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
666             ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
667         KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
668             ("[uncore,%d] illegal row index %d", __LINE__, ri));
669
670         PMCDBG4(MDP,WRI,1, "ucp-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri,
671             UCP_PMC0 + ri, v);
672
673         if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
674                 v = ucp_reload_count_to_perfctr_value(v);
675
676         /*
677          * Write the new value to the counter.  The counter will be in
678          * a stopped state when the pcd_write() entry point is called.
679          */
680
681         wrmsr(UCP_PMC0 + ri, v);
682
683         return (0);
684 }
685
686
687 static void
688 ucp_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
689 {
690         struct pmc_classdep *pcd;
691
692         KASSERT(md != NULL, ("[ucp,%d] md is NULL", __LINE__));
693
694         PMCDBG0(MDP,INI,1, "ucp-initialize");
695
696         pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP];
697
698         pcd->pcd_caps   = UCP_PMC_CAPS;
699         pcd->pcd_class  = PMC_CLASS_UCP;
700         pcd->pcd_num    = npmc;
701         pcd->pcd_ri     = md->pmd_npmc;
702         pcd->pcd_width  = pmcwidth;
703
704         pcd->pcd_allocate_pmc   = ucp_allocate_pmc;
705         pcd->pcd_config_pmc     = ucp_config_pmc;
706         pcd->pcd_describe       = ucp_describe;
707         pcd->pcd_get_config     = ucp_get_config;
708         pcd->pcd_get_msr        = NULL;
709         pcd->pcd_pcpu_fini      = uncore_pcpu_fini;
710         pcd->pcd_pcpu_init      = uncore_pcpu_init;
711         pcd->pcd_read_pmc       = ucp_read_pmc;
712         pcd->pcd_release_pmc    = ucp_release_pmc;
713         pcd->pcd_start_pmc      = ucp_start_pmc;
714         pcd->pcd_stop_pmc       = ucp_stop_pmc;
715         pcd->pcd_write_pmc      = ucp_write_pmc;
716
717         md->pmd_npmc           += npmc;
718 }
719
720 int
721 pmc_uncore_initialize(struct pmc_mdep *md, int maxcpu)
722 {
723         uncore_cputype = md->pmd_cputype;
724         uncore_pmcmask = 0;
725
726         /*
727          * Initialize programmable counters.
728          */
729
730         uncore_ucp_npmc  = 8;
731         uncore_ucp_width = 48;
732
733         uncore_pmcmask |= ((1ULL << uncore_ucp_npmc) - 1);
734
735         ucp_initialize(md, maxcpu, uncore_ucp_npmc, uncore_ucp_width);
736
737         /*
738          * Initialize fixed function counters, if present.
739          */
740         uncore_ucf_ri = uncore_ucp_npmc;
741         uncore_ucf_npmc  = 1;
742         uncore_ucf_width = 48;
743
744         ucf_initialize(md, maxcpu, uncore_ucf_npmc, uncore_ucf_width);
745         uncore_pmcmask |= ((1ULL << uncore_ucf_npmc) - 1) << SELECTOFF(uncore_cputype);
746
747         PMCDBG2(MDP,INI,1,"uncore-init pmcmask=0x%jx ucfri=%d", uncore_pmcmask,
748             uncore_ucf_ri);
749
750         uncore_pcpu = malloc(sizeof(*uncore_pcpu) * maxcpu, M_PMC,
751             M_ZERO | M_WAITOK);
752
753         return (0);
754 }
755
756 void
757 pmc_uncore_finalize(struct pmc_mdep *md)
758 {
759         PMCDBG0(MDP,INI,1, "uncore-finalize");
760
761         free(uncore_pcpu, M_PMC);
762         uncore_pcpu = NULL;
763 }