]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/hwpmc/hwpmc_armv7.c
libarchive: import changes from upstream
[FreeBSD/FreeBSD.git] / sys / dev / hwpmc / hwpmc_armv7.c
1 /*-
2  * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/pmc.h>
37 #include <sys/pmckern.h>
38
39 #include <machine/pmc_mdep.h>
40 #include <machine/cpu.h>
41
42 static int armv7_npmcs;
43
44 struct armv7_event_code_map {
45         enum pmc_event  pe_ev;
46         uint8_t         pe_code;
47 };
48
49 #define PMC_EV_CPU_CYCLES       0xFF
50
51 /*
52  * Per-processor information.
53  */
54 struct armv7_cpu {
55         struct pmc_hw   *pc_armv7pmcs;
56 };
57
58 static struct armv7_cpu **armv7_pcpu;
59
60 /*
61  * Interrupt Enable Set Register
62  */
63 static __inline void
64 armv7_interrupt_enable(uint32_t pmc)
65 {
66         uint32_t reg;
67
68         reg = (1 << pmc);
69         cp15_pminten_set(reg);
70 }
71
72 /*
73  * Interrupt Clear Set Register
74  */
75 static __inline void
76 armv7_interrupt_disable(uint32_t pmc)
77 {
78         uint32_t reg;
79
80         reg = (1 << pmc);
81         cp15_pminten_clr(reg);
82 }
83
84 /*
85  * Counter Set Enable Register
86  */
87 static __inline void
88 armv7_counter_enable(unsigned int pmc)
89 {
90         uint32_t reg;
91
92         reg = (1 << pmc);
93         cp15_pmcnten_set(reg);
94 }
95
96 /*
97  * Counter Clear Enable Register
98  */
99 static __inline void
100 armv7_counter_disable(unsigned int pmc)
101 {
102         uint32_t reg;
103
104         reg = (1 << pmc);
105         cp15_pmcnten_clr(reg);
106 }
107
108 /*
109  * Performance Count Register N
110  */
111 static uint32_t
112 armv7_pmcn_read(unsigned int pmc, uint32_t evsel)
113 {
114
115         if (evsel == PMC_EV_CPU_CYCLES) {
116                 return ((uint32_t)cp15_pmccntr_get());
117         }
118
119         KASSERT(pmc < armv7_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
120
121         cp15_pmselr_set(pmc);
122         return (cp15_pmxevcntr_get());
123 }
124
125 static uint32_t
126 armv7_pmcn_write(unsigned int pmc, uint32_t reg)
127 {
128
129         KASSERT(pmc < armv7_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
130
131         cp15_pmselr_set(pmc);
132         cp15_pmxevcntr_set(reg);
133
134         return (reg);
135 }
136
137 static int
138 armv7_allocate_pmc(int cpu, int ri, struct pmc *pm,
139   const struct pmc_op_pmcallocate *a)
140 {
141         struct armv7_cpu *pac;
142         enum pmc_event pe;
143         uint32_t config;
144
145         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
146             ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
147         KASSERT(ri >= 0 && ri < armv7_npmcs,
148             ("[armv7,%d] illegal row index %d", __LINE__, ri));
149
150         pac = armv7_pcpu[cpu];
151
152         if (a->pm_class != PMC_CLASS_ARMV7)
153                 return (EINVAL);
154         pe = a->pm_ev;
155
156         config = (pe & EVENT_ID_MASK);
157         pm->pm_md.pm_armv7.pm_armv7_evsel = config;
158
159         PMCDBG2(MDP, ALL, 2, "armv7-allocate ri=%d -> config=0x%x", ri, config);
160
161         return 0;
162 }
163
164
165 static int
166 armv7_read_pmc(int cpu, int ri, pmc_value_t *v)
167 {
168         pmc_value_t tmp;
169         struct pmc *pm;
170         register_t s;
171         u_int reg;
172
173         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
174             ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
175         KASSERT(ri >= 0 && ri < armv7_npmcs,
176             ("[armv7,%d] illegal row index %d", __LINE__, ri));
177
178         pm  = armv7_pcpu[cpu]->pc_armv7pmcs[ri].phw_pmc;
179
180         s = intr_disable();
181         tmp = armv7_pmcn_read(ri, pm->pm_md.pm_armv7.pm_armv7_evsel);
182
183         /* Check if counter has overflowed */
184         if (pm->pm_md.pm_armv7.pm_armv7_evsel == PMC_EV_CPU_CYCLES)
185                 reg = (1u << 31);
186         else
187                 reg = (1u << ri);
188
189         if ((cp15_pmovsr_get() & reg) != 0) {
190                 /* Clear Overflow Flag */
191                 cp15_pmovsr_set(reg);
192                 pm->pm_pcpu_state[cpu].pps_overflowcnt++;
193
194                 /* Reread counter in case we raced. */
195                 tmp = armv7_pmcn_read(ri, pm->pm_md.pm_armv7.pm_armv7_evsel);
196         }
197         tmp += 0x100000000llu * pm->pm_pcpu_state[cpu].pps_overflowcnt;
198         intr_restore(s);
199
200         PMCDBG2(MDP, REA, 2, "armv7-read id=%d -> %jd", ri, tmp);
201         if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
202                 /*
203                  * Clamp value to 0 if the counter just overflowed,
204                  * otherwise the returned reload count would wrap to a
205                  * huge value.
206                  */
207                 if ((tmp & (1ull << 63)) == 0)
208                         tmp = 0;
209                 else
210                         tmp = ARMV7_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
211         }
212         *v = tmp;
213
214         return 0;
215 }
216
217 static int
218 armv7_write_pmc(int cpu, int ri, pmc_value_t v)
219 {
220         struct pmc *pm;
221
222         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
223             ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
224         KASSERT(ri >= 0 && ri < armv7_npmcs,
225             ("[armv7,%d] illegal row-index %d", __LINE__, ri));
226
227         pm  = armv7_pcpu[cpu]->pc_armv7pmcs[ri].phw_pmc;
228
229         if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
230                 v = ARMV7_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
231         
232         PMCDBG3(MDP, WRI, 1, "armv7-write cpu=%d ri=%d v=%jx", cpu, ri, v);
233
234         pm->pm_pcpu_state[cpu].pps_overflowcnt = v >> 32;
235         if (pm->pm_md.pm_armv7.pm_armv7_evsel == PMC_EV_CPU_CYCLES)
236                 cp15_pmccntr_set(v);
237         else
238                 armv7_pmcn_write(ri, v);
239
240         return 0;
241 }
242
243 static int
244 armv7_config_pmc(int cpu, int ri, struct pmc *pm)
245 {
246         struct pmc_hw *phw;
247
248         PMCDBG3(MDP, CFG, 1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
249
250         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
251             ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
252         KASSERT(ri >= 0 && ri < armv7_npmcs,
253             ("[armv7,%d] illegal row-index %d", __LINE__, ri));
254
255         phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
256
257         KASSERT(pm == NULL || phw->phw_pmc == NULL,
258             ("[armv7,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
259             __LINE__, pm, phw->phw_pmc));
260
261         phw->phw_pmc = pm;
262
263         return 0;
264 }
265
266 static int
267 armv7_start_pmc(int cpu, int ri)
268 {
269         struct pmc_hw *phw;
270         uint32_t config;
271         struct pmc *pm;
272
273         phw    = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
274         pm     = phw->phw_pmc;
275         config = pm->pm_md.pm_armv7.pm_armv7_evsel;
276
277         /*
278          * Configure the event selection.
279          */
280         if (config != PMC_EV_CPU_CYCLES) {
281                 cp15_pmselr_set(ri);
282                 cp15_pmxevtyper_set(config);
283         } else
284                 ri = 31;
285
286         /*
287          * Enable the PMC.
288          */
289         armv7_interrupt_enable(ri);
290         armv7_counter_enable(ri);
291
292         return 0;
293 }
294
295 static int
296 armv7_stop_pmc(int cpu, int ri)
297 {
298         struct pmc_hw *phw;
299         struct pmc *pm;
300         uint32_t config;
301
302         phw    = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
303         pm     = phw->phw_pmc;
304         config = pm->pm_md.pm_armv7.pm_armv7_evsel;
305         if (config == PMC_EV_CPU_CYCLES)
306                 ri = 31;
307
308         /*
309          * Disable the PMCs.
310          */
311         armv7_counter_disable(ri);
312         armv7_interrupt_disable(ri);
313
314         return 0;
315 }
316
317 static int
318 armv7_release_pmc(int cpu, int ri, struct pmc *pmc)
319 {
320         struct pmc_hw *phw;
321
322         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
323             ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
324         KASSERT(ri >= 0 && ri < armv7_npmcs,
325             ("[armv7,%d] illegal row-index %d", __LINE__, ri));
326
327         phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
328         KASSERT(phw->phw_pmc == NULL,
329             ("[armv7,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
330
331         return 0;
332 }
333
334 static int
335 armv7_intr(struct trapframe *tf)
336 {
337         struct armv7_cpu *pc;
338         int retval, ri;
339         struct pmc *pm;
340         int error;
341         int reg, cpu;
342
343         cpu = curcpu;
344         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
345             ("[armv7,%d] CPU %d out of range", __LINE__, cpu));
346
347         retval = 0;
348         pc = armv7_pcpu[cpu];
349
350         for (ri = 0; ri < armv7_npmcs; ri++) {
351                 pm = armv7_pcpu[cpu]->pc_armv7pmcs[ri].phw_pmc;
352                 if (pm == NULL)
353                         continue;
354
355                 /* Check if counter has overflowed */
356                 if (pm->pm_md.pm_armv7.pm_armv7_evsel == PMC_EV_CPU_CYCLES)
357                         reg = (1u << 31);
358                 else
359                         reg = (1u << ri);
360
361                 if ((cp15_pmovsr_get() & reg) == 0) {
362                         continue;
363                 }
364
365                 /* Clear Overflow Flag */
366                 cp15_pmovsr_set(reg);
367
368                 retval = 1; /* Found an interrupting PMC. */
369
370                 pm->pm_pcpu_state[cpu].pps_overflowcnt += 1;
371
372                 if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
373                         continue;
374
375                 if (pm->pm_state != PMC_STATE_RUNNING)
376                         continue;
377
378                 error = pmc_process_interrupt(PMC_HR, pm, tf);
379                 if (error)
380                         armv7_stop_pmc(cpu, ri);
381
382                 /* Reload sampling count */
383                 armv7_write_pmc(cpu, ri, pm->pm_sc.pm_reloadcount);
384         }
385
386         return (retval);
387 }
388
389 static int
390 armv7_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
391 {
392         char armv7_name[PMC_NAME_MAX];
393         struct pmc_hw *phw;
394         int error;
395
396         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
397             ("[armv7,%d], illegal CPU %d", __LINE__, cpu));
398         KASSERT(ri >= 0 && ri < armv7_npmcs,
399             ("[armv7,%d] row-index %d out of range", __LINE__, ri));
400
401         phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
402         snprintf(armv7_name, sizeof(armv7_name), "ARMV7-%d", ri);
403         if ((error = copystr(armv7_name, pi->pm_name, PMC_NAME_MAX,
404             NULL)) != 0)
405                 return error;
406         pi->pm_class = PMC_CLASS_ARMV7;
407         if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
408                 pi->pm_enabled = TRUE;
409                 *ppmc = phw->phw_pmc;
410         } else {
411                 pi->pm_enabled = FALSE;
412                 *ppmc = NULL;
413         }
414
415         return (0);
416 }
417
418 static int
419 armv7_get_config(int cpu, int ri, struct pmc **ppm)
420 {
421
422         *ppm = armv7_pcpu[cpu]->pc_armv7pmcs[ri].phw_pmc;
423
424         return 0;
425 }
426
427 /*
428  * XXX don't know what we should do here.
429  */
430 static int
431 armv7_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
432 {
433
434         return 0;
435 }
436
437 static int
438 armv7_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
439 {
440
441         return 0;
442 }
443
444 static int
445 armv7_pcpu_init(struct pmc_mdep *md, int cpu)
446 {
447         struct armv7_cpu *pac;
448         struct pmc_hw  *phw;
449         struct pmc_cpu *pc;
450         uint32_t pmnc;
451         int first_ri;
452         int i;
453
454         KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
455             ("[armv7,%d] wrong cpu number %d", __LINE__, cpu));
456         PMCDBG1(MDP, INI, 1, "armv7-init cpu=%d", cpu);
457
458         armv7_pcpu[cpu] = pac = malloc(sizeof(struct armv7_cpu), M_PMC,
459             M_WAITOK|M_ZERO);
460
461         pac->pc_armv7pmcs = malloc(sizeof(struct pmc_hw) * armv7_npmcs,
462             M_PMC, M_WAITOK|M_ZERO);
463         pc = pmc_pcpu[cpu];
464         first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV7].pcd_ri;
465         KASSERT(pc != NULL, ("[armv7,%d] NULL per-cpu pointer", __LINE__));
466
467         for (i = 0, phw = pac->pc_armv7pmcs; i < armv7_npmcs; i++, phw++) {
468                 phw->phw_state    = PMC_PHW_FLAG_IS_ENABLED |
469                     PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
470                 phw->phw_pmc      = NULL;
471                 pc->pc_hwpmcs[i + first_ri] = phw;
472         }
473
474         pmnc = 0xffffffff;
475         cp15_pmcnten_clr(pmnc);
476         cp15_pminten_clr(pmnc);
477         cp15_pmovsr_set(pmnc);
478
479         /* Enable unit */
480         pmnc = cp15_pmcr_get();
481         pmnc |= ARMV7_PMNC_ENABLE;
482         cp15_pmcr_set(pmnc);
483
484         return 0;
485 }
486
487 static int
488 armv7_pcpu_fini(struct pmc_mdep *md, int cpu)
489 {
490         uint32_t pmnc;
491
492         pmnc = cp15_pmcr_get();
493         pmnc &= ~ARMV7_PMNC_ENABLE;
494         cp15_pmcr_set(pmnc);
495
496         pmnc = 0xffffffff;
497         cp15_pmcnten_clr(pmnc);
498         cp15_pminten_clr(pmnc);
499         cp15_pmovsr_set(pmnc);
500
501         return 0;
502 }
503
504 struct pmc_mdep *
505 pmc_armv7_initialize()
506 {
507         struct pmc_mdep *pmc_mdep;
508         struct pmc_classdep *pcd;
509         int idcode;
510         int reg;
511
512         reg = cp15_pmcr_get();
513         armv7_npmcs = (reg >> ARMV7_PMNC_N_SHIFT) & \
514                                 ARMV7_PMNC_N_MASK;
515         idcode = (reg & ARMV7_IDCODE_MASK) >> ARMV7_IDCODE_SHIFT;
516
517         PMCDBG1(MDP, INI, 1, "armv7-init npmcs=%d", armv7_npmcs);
518         
519         /*
520          * Allocate space for pointers to PMC HW descriptors and for
521          * the MDEP structure used by MI code.
522          */
523         armv7_pcpu = malloc(sizeof(struct armv7_cpu *) * pmc_cpu_max(),
524                 M_PMC, M_WAITOK | M_ZERO);
525
526         /* Just one class */
527         pmc_mdep = pmc_mdep_alloc(1);
528
529         switch (idcode) {
530         case ARMV7_IDCODE_CORTEX_A9:
531                 pmc_mdep->pmd_cputype = PMC_CPU_ARMV7_CORTEX_A9;
532                 break;
533         default:
534         case ARMV7_IDCODE_CORTEX_A8:
535                 /*
536                  * On A8 we implemented common events only,
537                  * so use it for the rest of machines.
538                  */
539                 pmc_mdep->pmd_cputype = PMC_CPU_ARMV7_CORTEX_A8;
540                 break;
541         }
542
543         pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV7];
544         pcd->pcd_caps  = ARMV7_PMC_CAPS;
545         pcd->pcd_class = PMC_CLASS_ARMV7;
546         pcd->pcd_num   = armv7_npmcs;
547         pcd->pcd_ri    = pmc_mdep->pmd_npmc;
548         pcd->pcd_width = 32;
549
550         pcd->pcd_allocate_pmc   = armv7_allocate_pmc;
551         pcd->pcd_config_pmc     = armv7_config_pmc;
552         pcd->pcd_pcpu_fini      = armv7_pcpu_fini;
553         pcd->pcd_pcpu_init      = armv7_pcpu_init;
554         pcd->pcd_describe       = armv7_describe;
555         pcd->pcd_get_config     = armv7_get_config;
556         pcd->pcd_read_pmc       = armv7_read_pmc;
557         pcd->pcd_release_pmc    = armv7_release_pmc;
558         pcd->pcd_start_pmc      = armv7_start_pmc;
559         pcd->pcd_stop_pmc       = armv7_stop_pmc;
560         pcd->pcd_write_pmc      = armv7_write_pmc;
561
562         pmc_mdep->pmd_intr       = armv7_intr;
563         pmc_mdep->pmd_switch_in  = armv7_switch_in;
564         pmc_mdep->pmd_switch_out = armv7_switch_out;
565         
566         pmc_mdep->pmd_npmc   += armv7_npmcs;
567
568         return (pmc_mdep);
569 }
570
571 void
572 pmc_armv7_finalize(struct pmc_mdep *md)
573 {
574
575 }