]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libpmc/libpmc.c
libpmc: silence scan-build warnings
[FreeBSD/FreeBSD.git] / lib / libpmc / libpmc.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2003-2008 Joseph Koshy
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/types.h>
33 #include <sys/param.h>
34 #include <sys/module.h>
35 #include <sys/pmc.h>
36 #include <sys/syscall.h>
37
38 #include <ctype.h>
39 #include <errno.h>
40 #include <fcntl.h>
41 #include <pmc.h>
42 #include <stdio.h>
43 #include <stdlib.h>
44 #include <string.h>
45 #include <strings.h>
46 #include <unistd.h>
47
48 #include "libpmcinternal.h"
49
50 /* Function prototypes */
51 #if defined(__i386__)
52 static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
53     struct pmc_op_pmcallocate *_pmc_config);
54 #endif
55 #if defined(__amd64__) || defined(__i386__)
56 static int iaf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
57     struct pmc_op_pmcallocate *_pmc_config);
58 static int iap_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
59     struct pmc_op_pmcallocate *_pmc_config);
60 static int ucf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
61     struct pmc_op_pmcallocate *_pmc_config);
62 static int ucp_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
63     struct pmc_op_pmcallocate *_pmc_config);
64 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
65     struct pmc_op_pmcallocate *_pmc_config);
66 static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
67     struct pmc_op_pmcallocate *_pmc_config);
68 #endif
69 #if defined(__i386__)
70 static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
71     struct pmc_op_pmcallocate *_pmc_config);
72 static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
73     struct pmc_op_pmcallocate *_pmc_config);
74 #endif
75 #if defined(__amd64__) || defined(__i386__)
76 static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
77     struct pmc_op_pmcallocate *_pmc_config);
78 #endif
79 #if defined(__arm__)
80 #if defined(__XSCALE__)
81 static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
82     struct pmc_op_pmcallocate *_pmc_config);
83 #endif
84 static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
85     struct pmc_op_pmcallocate *_pmc_config);
86 #endif
87 #if defined(__aarch64__)
88 static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
89     struct pmc_op_pmcallocate *_pmc_config);
90 #endif
91 #if defined(__mips__)
92 static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec,
93                              struct pmc_op_pmcallocate *_pmc_config);
94 #endif /* __mips__ */
95 static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
96     struct pmc_op_pmcallocate *_pmc_config);
97
98 #if defined(__powerpc__)
99 static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec,
100                              struct pmc_op_pmcallocate *_pmc_config);
101 #endif /* __powerpc__ */
102
103 #define PMC_CALL(cmd, params)                           \
104         syscall(pmc_syscall, PMC_OP_##cmd, (params))
105
106 /*
107  * Event aliases provide a way for the user to ask for generic events
108  * like "cache-misses", or "instructions-retired".  These aliases are
109  * mapped to the appropriate canonical event descriptions using a
110  * lookup table.
111  */
112 struct pmc_event_alias {
113         const char      *pm_alias;
114         const char      *pm_spec;
115 };
116
117 static const struct pmc_event_alias *pmc_mdep_event_aliases;
118
119 /*
120  * The pmc_event_descr structure maps symbolic names known to the user
121  * to integer codes used by the PMC KLD.
122  */
123 struct pmc_event_descr {
124         const char      *pm_ev_name;
125         enum pmc_event  pm_ev_code;
126 };
127
128 /*
129  * The pmc_class_descr structure maps class name prefixes for
130  * event names to event tables and other PMC class data.
131  */
132 struct pmc_class_descr {
133         const char      *pm_evc_name;
134         size_t          pm_evc_name_size;
135         enum pmc_class  pm_evc_class;
136         const struct pmc_event_descr *pm_evc_event_table;
137         size_t          pm_evc_event_table_size;
138         int             (*pm_evc_allocate_pmc)(enum pmc_event _pe,
139                             char *_ctrspec, struct pmc_op_pmcallocate *_pa);
140 };
141
142 #define PMC_TABLE_SIZE(N)       (sizeof(N)/sizeof(N[0]))
143 #define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table)
144
145 #undef  __PMC_EV
146 #define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
147
148 /*
149  * PMC_CLASSDEP_TABLE(NAME, CLASS)
150  *
151  * Define a table mapping event names and aliases to HWPMC event IDs.
152  */
153 #define PMC_CLASSDEP_TABLE(N, C)                                \
154         static const struct pmc_event_descr N##_event_table[] = \
155         {                                                       \
156                 __PMC_EV_##C()                                  \
157         }
158
159 PMC_CLASSDEP_TABLE(iaf, IAF);
160 PMC_CLASSDEP_TABLE(k7, K7);
161 PMC_CLASSDEP_TABLE(k8, K8);
162 PMC_CLASSDEP_TABLE(p4, P4);
163 PMC_CLASSDEP_TABLE(p5, P5);
164 PMC_CLASSDEP_TABLE(p6, P6);
165 PMC_CLASSDEP_TABLE(xscale, XSCALE);
166 PMC_CLASSDEP_TABLE(armv7, ARMV7);
167 PMC_CLASSDEP_TABLE(armv8, ARMV8);
168 PMC_CLASSDEP_TABLE(mips24k, MIPS24K);
169 PMC_CLASSDEP_TABLE(mips74k, MIPS74K);
170 PMC_CLASSDEP_TABLE(octeon, OCTEON);
171 PMC_CLASSDEP_TABLE(ucf, UCF);
172 PMC_CLASSDEP_TABLE(ppc7450, PPC7450);
173 PMC_CLASSDEP_TABLE(ppc970, PPC970);
174 PMC_CLASSDEP_TABLE(e500, E500);
175
176 static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT];
177
178 #undef  __PMC_EV_ALIAS
179 #define __PMC_EV_ALIAS(N,CODE)  { N, PMC_EV_##CODE },
180
181 static const struct pmc_event_descr atom_event_table[] =
182 {
183         __PMC_EV_ALIAS_ATOM()
184 };
185
186 static const struct pmc_event_descr atom_silvermont_event_table[] =
187 {
188         __PMC_EV_ALIAS_ATOM_SILVERMONT()
189 };
190
191 static const struct pmc_event_descr core_event_table[] =
192 {
193         __PMC_EV_ALIAS_CORE()
194 };
195
196
197 static const struct pmc_event_descr core2_event_table[] =
198 {
199         __PMC_EV_ALIAS_CORE2()
200 };
201
202 static const struct pmc_event_descr corei7_event_table[] =
203 {
204         __PMC_EV_ALIAS_COREI7()
205 };
206
207 static const struct pmc_event_descr nehalem_ex_event_table[] =
208 {
209         __PMC_EV_ALIAS_COREI7()
210 };
211
212 static const struct pmc_event_descr haswell_event_table[] =
213 {
214         __PMC_EV_ALIAS_HASWELL()
215 };
216
217 static const struct pmc_event_descr haswell_xeon_event_table[] =
218 {
219         __PMC_EV_ALIAS_HASWELL_XEON()
220 };
221
222 static const struct pmc_event_descr broadwell_event_table[] =
223 {
224         __PMC_EV_ALIAS_BROADWELL()
225 };
226
227 static const struct pmc_event_descr broadwell_xeon_event_table[] =
228 {
229         __PMC_EV_ALIAS_BROADWELL_XEON()
230 };
231
232 static const struct pmc_event_descr skylake_event_table[] =
233 {
234         __PMC_EV_ALIAS_SKYLAKE()
235 };
236
237 static const struct pmc_event_descr skylake_xeon_event_table[] =
238 {
239         __PMC_EV_ALIAS_SKYLAKE_XEON()
240 };
241
242 static const struct pmc_event_descr ivybridge_event_table[] =
243 {
244         __PMC_EV_ALIAS_IVYBRIDGE()
245 };
246
247 static const struct pmc_event_descr ivybridge_xeon_event_table[] = 
248 {
249         __PMC_EV_ALIAS_IVYBRIDGE_XEON()
250 };
251
252 static const struct pmc_event_descr sandybridge_event_table[] = 
253 {
254         __PMC_EV_ALIAS_SANDYBRIDGE()
255 };
256
257 static const struct pmc_event_descr sandybridge_xeon_event_table[] = 
258 {
259         __PMC_EV_ALIAS_SANDYBRIDGE_XEON()
260 };
261
262 static const struct pmc_event_descr westmere_event_table[] =
263 {
264         __PMC_EV_ALIAS_WESTMERE()
265 };
266
267 static const struct pmc_event_descr westmere_ex_event_table[] =
268 {
269         __PMC_EV_ALIAS_WESTMERE()
270 };
271
272 static const struct pmc_event_descr corei7uc_event_table[] =
273 {
274         __PMC_EV_ALIAS_COREI7UC()
275 };
276
277 static const struct pmc_event_descr haswelluc_event_table[] =
278 {
279         __PMC_EV_ALIAS_HASWELLUC()
280 };
281
282 static const struct pmc_event_descr broadwelluc_event_table[] =
283 {
284         __PMC_EV_ALIAS_BROADWELLUC()
285 };
286
287 static const struct pmc_event_descr sandybridgeuc_event_table[] =
288 {
289         __PMC_EV_ALIAS_SANDYBRIDGEUC()
290 };
291
292 static const struct pmc_event_descr westmereuc_event_table[] =
293 {
294         __PMC_EV_ALIAS_WESTMEREUC()
295 };
296
297 static const struct pmc_event_descr cortex_a8_event_table[] = 
298 {
299         __PMC_EV_ALIAS_ARMV7_CORTEX_A8()
300 };
301
302 static const struct pmc_event_descr cortex_a9_event_table[] = 
303 {
304         __PMC_EV_ALIAS_ARMV7_CORTEX_A9()
305 };
306
307 static const struct pmc_event_descr cortex_a53_event_table[] = 
308 {
309         __PMC_EV_ALIAS_ARMV8_CORTEX_A53()
310 };
311
312 static const struct pmc_event_descr cortex_a57_event_table[] = 
313 {
314         __PMC_EV_ALIAS_ARMV8_CORTEX_A57()
315 };
316
317 /*
318  * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...)
319  *
320  * Map a CPU to the PMC classes it supports.
321  */
322 #define PMC_MDEP_TABLE(N,C,...)                         \
323         static const enum pmc_class N##_pmc_classes[] = {       \
324                 PMC_CLASS_##C, __VA_ARGS__                      \
325         }
326
327 PMC_MDEP_TABLE(atom, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
328 PMC_MDEP_TABLE(atom_silvermont, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
329 PMC_MDEP_TABLE(core, IAP, PMC_CLASS_SOFT, PMC_CLASS_TSC);
330 PMC_MDEP_TABLE(core2, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
331 PMC_MDEP_TABLE(corei7, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
332 PMC_MDEP_TABLE(nehalem_ex, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
333 PMC_MDEP_TABLE(haswell, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
334 PMC_MDEP_TABLE(haswell_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
335 PMC_MDEP_TABLE(broadwell, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
336 PMC_MDEP_TABLE(broadwell_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
337 PMC_MDEP_TABLE(skylake, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
338 PMC_MDEP_TABLE(skylake_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
339 PMC_MDEP_TABLE(ivybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
340 PMC_MDEP_TABLE(ivybridge_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
341 PMC_MDEP_TABLE(sandybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
342 PMC_MDEP_TABLE(sandybridge_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
343 PMC_MDEP_TABLE(westmere, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
344 PMC_MDEP_TABLE(westmere_ex, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
345 PMC_MDEP_TABLE(k7, K7, PMC_CLASS_SOFT, PMC_CLASS_TSC);
346 PMC_MDEP_TABLE(k8, K8, PMC_CLASS_SOFT, PMC_CLASS_TSC);
347 PMC_MDEP_TABLE(p4, P4, PMC_CLASS_SOFT, PMC_CLASS_TSC);
348 PMC_MDEP_TABLE(p5, P5, PMC_CLASS_SOFT, PMC_CLASS_TSC);
349 PMC_MDEP_TABLE(p6, P6, PMC_CLASS_SOFT, PMC_CLASS_TSC);
350 PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE);
351 PMC_MDEP_TABLE(cortex_a8, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7);
352 PMC_MDEP_TABLE(cortex_a9, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7);
353 PMC_MDEP_TABLE(cortex_a53, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8);
354 PMC_MDEP_TABLE(cortex_a57, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8);
355 PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K);
356 PMC_MDEP_TABLE(mips74k, MIPS74K, PMC_CLASS_SOFT, PMC_CLASS_MIPS74K);
357 PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON);
358 PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_SOFT, PMC_CLASS_PPC7450, PMC_CLASS_TSC);
359 PMC_MDEP_TABLE(ppc970, PPC970, PMC_CLASS_SOFT, PMC_CLASS_PPC970, PMC_CLASS_TSC);
360 PMC_MDEP_TABLE(e500, E500, PMC_CLASS_SOFT, PMC_CLASS_E500, PMC_CLASS_TSC);
361 PMC_MDEP_TABLE(generic, SOFT, PMC_CLASS_SOFT);
362
363 static const struct pmc_event_descr tsc_event_table[] =
364 {
365         __PMC_EV_TSC()
366 };
367
368 #undef  PMC_CLASS_TABLE_DESC
369 #define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR)    \
370 static const struct pmc_class_descr NAME##_class_table_descr =  \
371         {                                                       \
372                 .pm_evc_name  = #CLASS "-",                     \
373                 .pm_evc_name_size = sizeof(#CLASS "-") - 1,     \
374                 .pm_evc_class = PMC_CLASS_##CLASS ,             \
375                 .pm_evc_event_table = EVENTS##_event_table ,    \
376                 .pm_evc_event_table_size =                      \
377                         PMC_EVENT_TABLE_SIZE(EVENTS),           \
378                 .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \
379         }
380
381 #if     defined(__i386__) || defined(__amd64__)
382 PMC_CLASS_TABLE_DESC(iaf, IAF, iaf, iaf);
383 PMC_CLASS_TABLE_DESC(atom, IAP, atom, iap);
384 PMC_CLASS_TABLE_DESC(atom_silvermont, IAP, atom_silvermont, iap);
385 PMC_CLASS_TABLE_DESC(core, IAP, core, iap);
386 PMC_CLASS_TABLE_DESC(core2, IAP, core2, iap);
387 PMC_CLASS_TABLE_DESC(corei7, IAP, corei7, iap);
388 PMC_CLASS_TABLE_DESC(nehalem_ex, IAP, nehalem_ex, iap);
389 PMC_CLASS_TABLE_DESC(haswell, IAP, haswell, iap);
390 PMC_CLASS_TABLE_DESC(haswell_xeon, IAP, haswell_xeon, iap);
391 PMC_CLASS_TABLE_DESC(broadwell, IAP, broadwell, iap);
392 PMC_CLASS_TABLE_DESC(broadwell_xeon, IAP, broadwell_xeon, iap);
393 PMC_CLASS_TABLE_DESC(skylake, IAP, skylake, iap);
394 PMC_CLASS_TABLE_DESC(skylake_xeon, IAP, skylake_xeon, iap);
395 PMC_CLASS_TABLE_DESC(ivybridge, IAP, ivybridge, iap);
396 PMC_CLASS_TABLE_DESC(ivybridge_xeon, IAP, ivybridge_xeon, iap);
397 PMC_CLASS_TABLE_DESC(sandybridge, IAP, sandybridge, iap);
398 PMC_CLASS_TABLE_DESC(sandybridge_xeon, IAP, sandybridge_xeon, iap);
399 PMC_CLASS_TABLE_DESC(westmere, IAP, westmere, iap);
400 PMC_CLASS_TABLE_DESC(westmere_ex, IAP, westmere_ex, iap);
401 PMC_CLASS_TABLE_DESC(ucf, UCF, ucf, ucf);
402 PMC_CLASS_TABLE_DESC(corei7uc, UCP, corei7uc, ucp);
403 PMC_CLASS_TABLE_DESC(haswelluc, UCP, haswelluc, ucp);
404 PMC_CLASS_TABLE_DESC(broadwelluc, UCP, broadwelluc, ucp);
405 PMC_CLASS_TABLE_DESC(sandybridgeuc, UCP, sandybridgeuc, ucp);
406 PMC_CLASS_TABLE_DESC(westmereuc, UCP, westmereuc, ucp);
407 #endif
408 #if     defined(__i386__)
409 PMC_CLASS_TABLE_DESC(k7, K7, k7, k7);
410 #endif
411 #if     defined(__i386__) || defined(__amd64__)
412 PMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
413 PMC_CLASS_TABLE_DESC(p4, P4, p4, p4);
414 #endif
415 #if     defined(__i386__)
416 PMC_CLASS_TABLE_DESC(p5, P5, p5, p5);
417 PMC_CLASS_TABLE_DESC(p6, P6, p6, p6);
418 #endif
419 #if     defined(__i386__) || defined(__amd64__)
420 PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
421 #endif
422 #if     defined(__arm__)
423 #if     defined(__XSCALE__)
424 PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale);
425 #endif
426 PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7);
427 PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7);
428 #endif
429 #if     defined(__aarch64__)
430 PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64);
431 PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64);
432 #endif
433 #if defined(__mips__)
434 PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips);
435 PMC_CLASS_TABLE_DESC(mips74k, MIPS74K, mips74k, mips);
436 PMC_CLASS_TABLE_DESC(octeon, OCTEON, octeon, mips);
437 #endif /* __mips__ */
438 #if defined(__powerpc__)
439 PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc);
440 PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc);
441 PMC_CLASS_TABLE_DESC(e500, E500, e500, powerpc);
442 #endif
443
444 static struct pmc_class_descr soft_class_table_descr =
445 {
446         .pm_evc_name  = "SOFT-",
447         .pm_evc_name_size = sizeof("SOFT-") - 1,
448         .pm_evc_class = PMC_CLASS_SOFT,
449         .pm_evc_event_table = NULL,
450         .pm_evc_event_table_size = 0,
451         .pm_evc_allocate_pmc = soft_allocate_pmc
452 };
453
454 #undef  PMC_CLASS_TABLE_DESC
455
456 static const struct pmc_class_descr **pmc_class_table;
457 #define PMC_CLASS_TABLE_SIZE    cpu_info.pm_nclass
458
459 static const enum pmc_class *pmc_mdep_class_list;
460 static size_t pmc_mdep_class_list_size;
461
462 /*
463  * Mapping tables, mapping enumeration values to human readable
464  * strings.
465  */
466
467 static const char * pmc_capability_names[] = {
468 #undef  __PMC_CAP
469 #define __PMC_CAP(N,V,D)        #N ,
470         __PMC_CAPS()
471 };
472
473 struct pmc_class_map {
474         enum pmc_class  pm_class;
475         const char      *pm_name;
476 };
477
478 static const struct pmc_class_map pmc_class_names[] = {
479 #undef  __PMC_CLASS
480 #define __PMC_CLASS(S,V,D) { .pm_class = PMC_CLASS_##S, .pm_name = #S } ,
481         __PMC_CLASSES()
482 };
483
484 struct pmc_cputype_map {
485         enum pmc_cputype pm_cputype;
486         const char      *pm_name;
487 };
488
489 static const struct pmc_cputype_map pmc_cputype_names[] = {
490 #undef  __PMC_CPU
491 #define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } ,
492         __PMC_CPUS()
493 };
494
495 static const char * pmc_disposition_names[] = {
496 #undef  __PMC_DISP
497 #define __PMC_DISP(D)   #D ,
498         __PMC_DISPOSITIONS()
499 };
500
501 static const char * pmc_mode_names[] = {
502 #undef  __PMC_MODE
503 #define __PMC_MODE(M,N) #M ,
504         __PMC_MODES()
505 };
506
507 static const char * pmc_state_names[] = {
508 #undef  __PMC_STATE
509 #define __PMC_STATE(S) #S ,
510         __PMC_STATES()
511 };
512
513 /*
514  * Filled in by pmc_init().
515  */
516 static int pmc_syscall = -1;
517 static struct pmc_cpuinfo cpu_info;
518 static struct pmc_op_getdyneventinfo soft_event_info;
519
520 /* Event masks for events */
521 struct pmc_masks {
522         const char      *pm_name;
523         const uint64_t  pm_value;
524 };
525 #define PMCMASK(N,V)    { .pm_name = #N, .pm_value = (V) }
526 #define NULLMASK        { .pm_name = NULL }
527
528 #if defined(__amd64__) || defined(__i386__)
529 static int
530 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask)
531 {
532         const struct pmc_masks *pm;
533         char *q, *r;
534         int c;
535
536         if (pmask == NULL)      /* no mask keywords */
537                 return (-1);
538         q = strchr(p, '=');     /* skip '=' */
539         if (*++q == '\0')       /* no more data */
540                 return (-1);
541         c = 0;                  /* count of mask keywords seen */
542         while ((r = strsep(&q, "+")) != NULL) {
543                 for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name);
544                     pm++)
545                         ;
546                 if (pm->pm_name == NULL) /* not found */
547                         return (-1);
548                 *evmask |= pm->pm_value;
549                 c++;
550         }
551         return (c);
552 }
553 #endif
554
555 #define KWMATCH(p,kw)           (strcasecmp((p), (kw)) == 0)
556 #define KWPREFIXMATCH(p,kw)     (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
557 #define EV_ALIAS(N,S)           { .pm_alias = N, .pm_spec = S }
558
559 #if defined(__i386__)
560
561 /*
562  * AMD K7 (Athlon) CPUs.
563  */
564
565 static struct pmc_event_alias k7_aliases[] = {
566         EV_ALIAS("branches",            "k7-retired-branches"),
567         EV_ALIAS("branch-mispredicts",  "k7-retired-branches-mispredicted"),
568         EV_ALIAS("cycles",              "tsc"),
569         EV_ALIAS("dc-misses",           "k7-dc-misses"),
570         EV_ALIAS("ic-misses",           "k7-ic-misses"),
571         EV_ALIAS("instructions",        "k7-retired-instructions"),
572         EV_ALIAS("interrupts",          "k7-hardware-interrupts"),
573         EV_ALIAS(NULL, NULL)
574 };
575
576 #define K7_KW_COUNT     "count"
577 #define K7_KW_EDGE      "edge"
578 #define K7_KW_INV       "inv"
579 #define K7_KW_OS        "os"
580 #define K7_KW_UNITMASK  "unitmask"
581 #define K7_KW_USR       "usr"
582
583 static int
584 k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
585     struct pmc_op_pmcallocate *pmc_config)
586 {
587         char            *e, *p, *q;
588         int             c, has_unitmask;
589         uint32_t        count, unitmask;
590
591         pmc_config->pm_md.pm_amd.pm_amd_config = 0;
592         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
593
594         if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
595             pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
596             pe == PMC_EV_K7_DC_WRITEBACKS) {
597                 has_unitmask = 1;
598                 unitmask = AMD_PMC_UNITMASK_MOESI;
599         } else
600                 unitmask = has_unitmask = 0;
601
602         while ((p = strsep(&ctrspec, ",")) != NULL) {
603                 if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
604                         q = strchr(p, '=');
605                         if (*++q == '\0') /* skip '=' */
606                                 return (-1);
607
608                         count = strtol(q, &e, 0);
609                         if (e == q || *e != '\0')
610                                 return (-1);
611
612                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
613                         pmc_config->pm_md.pm_amd.pm_amd_config |=
614                             AMD_PMC_TO_COUNTER(count);
615
616                 } else if (KWMATCH(p, K7_KW_EDGE)) {
617                         pmc_config->pm_caps |= PMC_CAP_EDGE;
618                 } else if (KWMATCH(p, K7_KW_INV)) {
619                         pmc_config->pm_caps |= PMC_CAP_INVERT;
620                 } else if (KWMATCH(p, K7_KW_OS)) {
621                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
622                 } else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
623                         if (has_unitmask == 0)
624                                 return (-1);
625                         unitmask = 0;
626                         q = strchr(p, '=');
627                         if (*++q == '\0') /* skip '=' */
628                                 return (-1);
629
630                         while ((c = tolower(*q++)) != 0)
631                                 if (c == 'm')
632                                         unitmask |= AMD_PMC_UNITMASK_M;
633                                 else if (c == 'o')
634                                         unitmask |= AMD_PMC_UNITMASK_O;
635                                 else if (c == 'e')
636                                         unitmask |= AMD_PMC_UNITMASK_E;
637                                 else if (c == 's')
638                                         unitmask |= AMD_PMC_UNITMASK_S;
639                                 else if (c == 'i')
640                                         unitmask |= AMD_PMC_UNITMASK_I;
641                                 else if (c == '+')
642                                         continue;
643                                 else
644                                         return (-1);
645
646                         if (unitmask == 0)
647                                 return (-1);
648
649                 } else if (KWMATCH(p, K7_KW_USR)) {
650                         pmc_config->pm_caps |= PMC_CAP_USER;
651                 } else
652                         return (-1);
653         }
654
655         if (has_unitmask) {
656                 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
657                 pmc_config->pm_md.pm_amd.pm_amd_config |=
658                     AMD_PMC_TO_UNITMASK(unitmask);
659         }
660
661         return (0);
662
663 }
664
665 #endif
666
667 #if defined(__amd64__) || defined(__i386__)
668
669 /*
670  * Intel Core (Family 6, Model E) PMCs.
671  */
672
673 static struct pmc_event_alias core_aliases[] = {
674         EV_ALIAS("branches",            "iap-br-instr-ret"),
675         EV_ALIAS("branch-mispredicts",  "iap-br-mispred-ret"),
676         EV_ALIAS("cycles",              "tsc-tsc"),
677         EV_ALIAS("ic-misses",           "iap-icache-misses"),
678         EV_ALIAS("instructions",        "iap-instr-ret"),
679         EV_ALIAS("interrupts",          "iap-core-hw-int-rx"),
680         EV_ALIAS("unhalted-cycles",     "iap-unhalted-core-cycles"),
681         EV_ALIAS(NULL, NULL)
682 };
683
684 /*
685  * Intel Core2 (Family 6, Model F), Core2Extreme (Family 6, Model 17H)
686  * and Atom (Family 6, model 1CH) PMCs.
687  *
688  * We map aliases to events on the fixed-function counters if these
689  * are present.  Note that not all CPUs in this family contain fixed-function
690  * counters.
691  */
692
693 static struct pmc_event_alias core2_aliases[] = {
694         EV_ALIAS("branches",            "iap-br-inst-retired.any"),
695         EV_ALIAS("branch-mispredicts",  "iap-br-inst-retired.mispred"),
696         EV_ALIAS("cycles",              "tsc-tsc"),
697         EV_ALIAS("ic-misses",           "iap-l1i-misses"),
698         EV_ALIAS("instructions",        "iaf-instr-retired.any"),
699         EV_ALIAS("interrupts",          "iap-hw-int-rcv"),
700         EV_ALIAS("unhalted-cycles",     "iaf-cpu-clk-unhalted.core"),
701         EV_ALIAS(NULL, NULL)
702 };
703
704 static struct pmc_event_alias core2_aliases_without_iaf[] = {
705         EV_ALIAS("branches",            "iap-br-inst-retired.any"),
706         EV_ALIAS("branch-mispredicts",  "iap-br-inst-retired.mispred"),
707         EV_ALIAS("cycles",              "tsc-tsc"),
708         EV_ALIAS("ic-misses",           "iap-l1i-misses"),
709         EV_ALIAS("instructions",        "iap-inst-retired.any_p"),
710         EV_ALIAS("interrupts",          "iap-hw-int-rcv"),
711         EV_ALIAS("unhalted-cycles",     "iap-cpu-clk-unhalted.core_p"),
712         EV_ALIAS(NULL, NULL)
713 };
714
715 #define atom_aliases                    core2_aliases
716 #define atom_aliases_without_iaf        core2_aliases_without_iaf
717 #define atom_silvermont_aliases         core2_aliases
718 #define atom_silvermont_aliases_without_iaf     core2_aliases_without_iaf
719 #define corei7_aliases                  core2_aliases
720 #define corei7_aliases_without_iaf      core2_aliases_without_iaf
721 #define nehalem_ex_aliases              core2_aliases
722 #define nehalem_ex_aliases_without_iaf  core2_aliases_without_iaf
723 #define haswell_aliases                 core2_aliases
724 #define haswell_aliases_without_iaf     core2_aliases_without_iaf
725 #define haswell_xeon_aliases                    core2_aliases
726 #define haswell_xeon_aliases_without_iaf        core2_aliases_without_iaf
727 #define broadwell_aliases                       core2_aliases
728 #define broadwell_aliases_without_iaf   core2_aliases_without_iaf
729 #define broadwell_xeon_aliases                  core2_aliases
730 #define broadwell_xeon_aliases_without_iaf      core2_aliases_without_iaf
731 #define skylake_aliases                 core2_aliases
732 #define skylake_aliases_without_iaf     core2_aliases_without_iaf
733 #define skylake_xeon_aliases            core2_aliases
734 #define skylake_xeon_aliases_without_iaf        core2_aliases_without_iaf
735 #define ivybridge_aliases               core2_aliases
736 #define ivybridge_aliases_without_iaf   core2_aliases_without_iaf
737 #define ivybridge_xeon_aliases          core2_aliases
738 #define ivybridge_xeon_aliases_without_iaf      core2_aliases_without_iaf
739 #define sandybridge_aliases             core2_aliases
740 #define sandybridge_aliases_without_iaf core2_aliases_without_iaf
741 #define sandybridge_xeon_aliases        core2_aliases
742 #define sandybridge_xeon_aliases_without_iaf    core2_aliases_without_iaf
743 #define westmere_aliases                core2_aliases
744 #define westmere_aliases_without_iaf    core2_aliases_without_iaf
745 #define westmere_ex_aliases             core2_aliases
746 #define westmere_ex_aliases_without_iaf core2_aliases_without_iaf
747
748 #define IAF_KW_OS               "os"
749 #define IAF_KW_USR              "usr"
750 #define IAF_KW_ANYTHREAD        "anythread"
751
752 /*
753  * Parse an event specifier for Intel fixed function counters.
754  */
755 static int
756 iaf_allocate_pmc(enum pmc_event pe, char *ctrspec,
757     struct pmc_op_pmcallocate *pmc_config)
758 {
759         char *p;
760
761         (void) pe;
762
763         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
764         pmc_config->pm_md.pm_iaf.pm_iaf_flags = 0;
765
766         while ((p = strsep(&ctrspec, ",")) != NULL) {
767                 if (KWMATCH(p, IAF_KW_OS))
768                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
769                 else if (KWMATCH(p, IAF_KW_USR))
770                         pmc_config->pm_caps |= PMC_CAP_USER;
771                 else if (KWMATCH(p, IAF_KW_ANYTHREAD))
772                         pmc_config->pm_md.pm_iaf.pm_iaf_flags |= IAF_ANY;
773                 else
774                         return (-1);
775         }
776
777         return (0);
778 }
779
780 /*
781  * Core/Core2 support.
782  */
783
784 #define IAP_KW_AGENT            "agent"
785 #define IAP_KW_ANYTHREAD        "anythread"
786 #define IAP_KW_CACHESTATE       "cachestate"
787 #define IAP_KW_CMASK            "cmask"
788 #define IAP_KW_CORE             "core"
789 #define IAP_KW_EDGE             "edge"
790 #define IAP_KW_INV              "inv"
791 #define IAP_KW_OS               "os"
792 #define IAP_KW_PREFETCH         "prefetch"
793 #define IAP_KW_SNOOPRESPONSE    "snoopresponse"
794 #define IAP_KW_SNOOPTYPE        "snooptype"
795 #define IAP_KW_TRANSITION       "trans"
796 #define IAP_KW_USR              "usr"
797 #define IAP_KW_RSP              "rsp"
798
799 static struct pmc_masks iap_core_mask[] = {
800         PMCMASK(all,    (0x3 << 14)),
801         PMCMASK(this,   (0x1 << 14)),
802         NULLMASK
803 };
804
805 static struct pmc_masks iap_agent_mask[] = {
806         PMCMASK(this,   0),
807         PMCMASK(any,    (0x1 << 13)),
808         NULLMASK
809 };
810
811 static struct pmc_masks iap_prefetch_mask[] = {
812         PMCMASK(both,           (0x3 << 12)),
813         PMCMASK(only,           (0x1 << 12)),
814         PMCMASK(exclude,        0),
815         NULLMASK
816 };
817
818 static struct pmc_masks iap_cachestate_mask[] = {
819         PMCMASK(i,              (1 <<  8)),
820         PMCMASK(s,              (1 <<  9)),
821         PMCMASK(e,              (1 << 10)),
822         PMCMASK(m,              (1 << 11)),
823         NULLMASK
824 };
825
826 static struct pmc_masks iap_snoopresponse_mask[] = {
827         PMCMASK(clean,          (1 << 8)),
828         PMCMASK(hit,            (1 << 9)),
829         PMCMASK(hitm,           (1 << 11)),
830         NULLMASK
831 };
832
833 static struct pmc_masks iap_snooptype_mask[] = {
834         PMCMASK(cmp2s,          (1 << 8)),
835         PMCMASK(cmp2i,          (1 << 9)),
836         NULLMASK
837 };
838
839 static struct pmc_masks iap_transition_mask[] = {
840         PMCMASK(any,            0x00),
841         PMCMASK(frequency,      0x10),
842         NULLMASK
843 };
844
845 static struct pmc_masks iap_rsp_mask_i7_wm[] = {
846         PMCMASK(DMND_DATA_RD,           (1 <<  0)),
847         PMCMASK(DMND_RFO,               (1 <<  1)),
848         PMCMASK(DMND_IFETCH,            (1 <<  2)),
849         PMCMASK(WB,                     (1 <<  3)),
850         PMCMASK(PF_DATA_RD,             (1 <<  4)),
851         PMCMASK(PF_RFO,                 (1 <<  5)),
852         PMCMASK(PF_IFETCH,              (1 <<  6)),
853         PMCMASK(OTHER,                  (1 <<  7)),
854         PMCMASK(UNCORE_HIT,             (1 <<  8)),
855         PMCMASK(OTHER_CORE_HIT_SNP,     (1 <<  9)),
856         PMCMASK(OTHER_CORE_HITM,        (1 << 10)),
857         PMCMASK(REMOTE_CACHE_FWD,       (1 << 12)),
858         PMCMASK(REMOTE_DRAM,            (1 << 13)),
859         PMCMASK(LOCAL_DRAM,             (1 << 14)),
860         PMCMASK(NON_DRAM,               (1 << 15)),
861         NULLMASK
862 };
863
864 static struct pmc_masks iap_rsp_mask_sb_sbx_ib[] = {
865         PMCMASK(REQ_DMND_DATA_RD,       (1ULL <<  0)),
866         PMCMASK(REQ_DMND_RFO,           (1ULL <<  1)),
867         PMCMASK(REQ_DMND_IFETCH,        (1ULL <<  2)),
868         PMCMASK(REQ_WB,                 (1ULL <<  3)),
869         PMCMASK(REQ_PF_DATA_RD,         (1ULL <<  4)),
870         PMCMASK(REQ_PF_RFO,             (1ULL <<  5)),
871         PMCMASK(REQ_PF_IFETCH,          (1ULL <<  6)),
872         PMCMASK(REQ_PF_LLC_DATA_RD,     (1ULL <<  7)),
873         PMCMASK(REQ_PF_LLC_RFO,         (1ULL <<  8)),
874         PMCMASK(REQ_PF_LLC_IFETCH,      (1ULL <<  9)),
875         PMCMASK(REQ_BUS_LOCKS,          (1ULL << 10)),
876         PMCMASK(REQ_STRM_ST,            (1ULL << 11)),
877         PMCMASK(REQ_OTHER,              (1ULL << 15)),
878         PMCMASK(RES_ANY,                (1ULL << 16)),
879         PMCMASK(RES_SUPPLIER_SUPP,      (1ULL << 17)),
880         PMCMASK(RES_SUPPLIER_LLC_HITM,  (1ULL << 18)),
881         PMCMASK(RES_SUPPLIER_LLC_HITE,  (1ULL << 19)),
882         PMCMASK(RES_SUPPLIER_LLC_HITS,  (1ULL << 20)),
883         PMCMASK(RES_SUPPLIER_LLC_HITF,  (1ULL << 21)),
884         PMCMASK(RES_SUPPLIER_LOCAL,     (1ULL << 22)),
885         PMCMASK(RES_SNOOP_SNP_NONE,     (1ULL << 31)),
886         PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
887         PMCMASK(RES_SNOOP_SNP_MISS,     (1ULL << 33)),
888         PMCMASK(RES_SNOOP_HIT_NO_FWD,   (1ULL << 34)),
889         PMCMASK(RES_SNOOP_HIT_FWD,      (1ULL << 35)),
890         PMCMASK(RES_SNOOP_HITM,         (1ULL << 36)),
891         PMCMASK(RES_NON_DRAM,           (1ULL << 37)),
892         NULLMASK
893 };
894
895 /* Broadwell is defined to use the same mask as Haswell */
896 static struct pmc_masks iap_rsp_mask_haswell[] = {
897         PMCMASK(REQ_DMND_DATA_RD,       (1ULL <<  0)),
898         PMCMASK(REQ_DMND_RFO,           (1ULL <<  1)),
899         PMCMASK(REQ_DMND_IFETCH,        (1ULL <<  2)),
900         PMCMASK(REQ_PF_DATA_RD,         (1ULL <<  4)),
901         PMCMASK(REQ_PF_RFO,             (1ULL <<  5)),
902         PMCMASK(REQ_PF_IFETCH,          (1ULL <<  6)),
903         PMCMASK(REQ_OTHER,              (1ULL << 15)),
904         PMCMASK(RES_ANY,                (1ULL << 16)),
905         PMCMASK(RES_SUPPLIER_SUPP,      (1ULL << 17)),
906         PMCMASK(RES_SUPPLIER_LLC_HITM,  (1ULL << 18)),
907         PMCMASK(RES_SUPPLIER_LLC_HITE,  (1ULL << 19)),
908         PMCMASK(RES_SUPPLIER_LLC_HITS,  (1ULL << 20)),
909         PMCMASK(RES_SUPPLIER_LLC_HITF,  (1ULL << 21)),
910         PMCMASK(RES_SUPPLIER_LOCAL,     (1ULL << 22)),
911         /* 
912          * For processor type 06_45H 22 is L4_HIT_LOCAL_L4
913          * and 23, 24 and 25 are also defined.
914          */
915         PMCMASK(RES_SNOOP_SNP_NONE,     (1ULL << 31)),
916         PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
917         PMCMASK(RES_SNOOP_SNP_MISS,     (1ULL << 33)),
918         PMCMASK(RES_SNOOP_HIT_NO_FWD,   (1ULL << 34)),
919         PMCMASK(RES_SNOOP_HIT_FWD,      (1ULL << 35)),
920         PMCMASK(RES_SNOOP_HITM,         (1ULL << 36)),
921         PMCMASK(RES_NON_DRAM,           (1ULL << 37)),
922         NULLMASK
923 };
924
925 static struct pmc_masks iap_rsp_mask_skylake[] = {
926         PMCMASK(REQ_DMND_DATA_RD,       (1ULL <<  0)),
927         PMCMASK(REQ_DMND_RFO,           (1ULL <<  1)),
928         PMCMASK(REQ_DMND_IFETCH,        (1ULL <<  2)),
929         PMCMASK(REQ_PF_DATA_RD,         (1ULL <<  7)),
930         PMCMASK(REQ_PF_RFO,             (1ULL <<  8)),
931         PMCMASK(REQ_STRM_ST,            (1ULL << 11)),
932         PMCMASK(REQ_OTHER,              (1ULL << 15)),
933         PMCMASK(RES_ANY,                (1ULL << 16)),
934         PMCMASK(RES_SUPPLIER_SUPP,      (1ULL << 17)),
935         PMCMASK(RES_SUPPLIER_LLC_HITM,  (1ULL << 18)),
936         PMCMASK(RES_SUPPLIER_LLC_HITE,  (1ULL << 19)),
937         PMCMASK(RES_SUPPLIER_LLC_HITS,  (1ULL << 20)),
938         PMCMASK(RES_SUPPLIER_L4_HIT,    (1ULL << 22)),
939         PMCMASK(RES_SUPPLIER_DRAM,      (1ULL << 26)),
940         PMCMASK(RES_SUPPLIER_SPL_HIT,   (1ULL << 30)),
941         PMCMASK(RES_SNOOP_SNP_NONE,     (1ULL << 31)),
942         PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
943         PMCMASK(RES_SNOOP_SNP_MISS,     (1ULL << 33)),
944         PMCMASK(RES_SNOOP_HIT_NO_FWD,   (1ULL << 34)),
945         PMCMASK(RES_SNOOP_HIT_FWD,      (1ULL << 35)),
946         PMCMASK(RES_SNOOP_HITM,         (1ULL << 36)),
947         PMCMASK(RES_NON_DRAM,           (1ULL << 37)),
948         NULLMASK
949 };
950
951
952 static int
953 iap_allocate_pmc(enum pmc_event pe, char *ctrspec,
954     struct pmc_op_pmcallocate *pmc_config)
955 {
956         char *e, *p, *q;
957         uint64_t cachestate, evmask, rsp;
958         int count, n;
959
960         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
961             PMC_CAP_QUALIFIER);
962         pmc_config->pm_md.pm_iap.pm_iap_config = 0;
963
964         cachestate = evmask = rsp = 0;
965
966         /* Parse additional modifiers if present */
967         while ((p = strsep(&ctrspec, ",")) != NULL) {
968
969                 n = 0;
970                 if (KWPREFIXMATCH(p, IAP_KW_CMASK "=")) {
971                         q = strchr(p, '=');
972                         if (*++q == '\0') /* skip '=' */
973                                 return (-1);
974                         count = strtol(q, &e, 0);
975                         if (e == q || *e != '\0')
976                                 return (-1);
977                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
978                         pmc_config->pm_md.pm_iap.pm_iap_config |=
979                             IAP_CMASK(count);
980                 } else if (KWMATCH(p, IAP_KW_EDGE)) {
981                         pmc_config->pm_caps |= PMC_CAP_EDGE;
982                 } else if (KWMATCH(p, IAP_KW_INV)) {
983                         pmc_config->pm_caps |= PMC_CAP_INVERT;
984                 } else if (KWMATCH(p, IAP_KW_OS)) {
985                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
986                 } else if (KWMATCH(p, IAP_KW_USR)) {
987                         pmc_config->pm_caps |= PMC_CAP_USER;
988                 } else if (KWMATCH(p, IAP_KW_ANYTHREAD)) {
989                         pmc_config->pm_md.pm_iap.pm_iap_config |= IAP_ANY;
990                 } else if (KWPREFIXMATCH(p, IAP_KW_CORE "=")) {
991                         n = pmc_parse_mask(iap_core_mask, p, &evmask);
992                         if (n != 1)
993                                 return (-1);
994                 } else if (KWPREFIXMATCH(p, IAP_KW_AGENT "=")) {
995                         n = pmc_parse_mask(iap_agent_mask, p, &evmask);
996                         if (n != 1)
997                                 return (-1);
998                 } else if (KWPREFIXMATCH(p, IAP_KW_PREFETCH "=")) {
999                         n = pmc_parse_mask(iap_prefetch_mask, p, &evmask);
1000                         if (n != 1)
1001                                 return (-1);
1002                 } else if (KWPREFIXMATCH(p, IAP_KW_CACHESTATE "=")) {
1003                         n = pmc_parse_mask(iap_cachestate_mask, p, &cachestate);
1004                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_CORE &&
1005                     KWPREFIXMATCH(p, IAP_KW_TRANSITION "=")) {
1006                         n = pmc_parse_mask(iap_transition_mask, p, &evmask);
1007                         if (n != 1)
1008                                 return (-1);
1009                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM ||
1010                     cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM_SILVERMONT ||
1011                     cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2 ||
1012                     cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2EXTREME) {
1013                         if (KWPREFIXMATCH(p, IAP_KW_SNOOPRESPONSE "=")) {
1014                                 n = pmc_parse_mask(iap_snoopresponse_mask, p,
1015                                     &evmask);
1016                         } else if (KWPREFIXMATCH(p, IAP_KW_SNOOPTYPE "=")) {
1017                                 n = pmc_parse_mask(iap_snooptype_mask, p,
1018                                     &evmask);
1019                         } else
1020                                 return (-1);
1021                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_COREI7 ||
1022                     cpu_info.pm_cputype == PMC_CPU_INTEL_WESTMERE ||
1023                     cpu_info.pm_cputype == PMC_CPU_INTEL_NEHALEM_EX ||
1024                     cpu_info.pm_cputype == PMC_CPU_INTEL_WESTMERE_EX) {
1025                         if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1026                                 n = pmc_parse_mask(iap_rsp_mask_i7_wm, p, &rsp);
1027                         } else
1028                                 return (-1);
1029                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_SANDYBRIDGE ||
1030                     cpu_info.pm_cputype == PMC_CPU_INTEL_SANDYBRIDGE_XEON ||
1031                     cpu_info.pm_cputype == PMC_CPU_INTEL_IVYBRIDGE ||
1032                     cpu_info.pm_cputype == PMC_CPU_INTEL_IVYBRIDGE_XEON ) {
1033                         if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1034                                 n = pmc_parse_mask(iap_rsp_mask_sb_sbx_ib, p, &rsp);
1035                         } else
1036                                 return (-1);
1037                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_HASWELL ||
1038                     cpu_info.pm_cputype == PMC_CPU_INTEL_HASWELL_XEON) {
1039                         if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1040                                 n = pmc_parse_mask(iap_rsp_mask_haswell, p, &rsp);
1041                         } else
1042                                 return (-1);
1043                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_BROADWELL ||
1044                     cpu_info.pm_cputype == PMC_CPU_INTEL_BROADWELL_XEON) {
1045                         /* Broadwell is defined to use same mask as haswell */
1046                         if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1047                                 n = pmc_parse_mask(iap_rsp_mask_haswell, p, &rsp);
1048                         } else
1049                                 return (-1);
1050
1051                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_SKYLAKE ||
1052                     cpu_info.pm_cputype == PMC_CPU_INTEL_SKYLAKE_XEON) {
1053                         if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1054                                 n = pmc_parse_mask(iap_rsp_mask_skylake, p, &rsp);
1055                         } else
1056                                 return (-1);
1057
1058                 } else
1059                         return (-1);
1060
1061                 if (n < 0)      /* Parsing failed. */
1062                         return (-1);
1063         }
1064
1065         pmc_config->pm_md.pm_iap.pm_iap_config |= evmask;
1066
1067         /*
1068          * If the event requires a 'cachestate' qualifier but was not
1069          * specified by the user, use a sensible default.
1070          */
1071         switch (pe) {
1072         case PMC_EV_IAP_EVENT_28H: /* Core, Core2, Atom */
1073         case PMC_EV_IAP_EVENT_29H: /* Core, Core2, Atom */
1074         case PMC_EV_IAP_EVENT_2AH: /* Core, Core2, Atom */
1075         case PMC_EV_IAP_EVENT_2BH: /* Atom, Core2 */
1076         case PMC_EV_IAP_EVENT_2EH: /* Core, Core2, Atom */
1077         case PMC_EV_IAP_EVENT_30H: /* Core, Core2, Atom */
1078         case PMC_EV_IAP_EVENT_32H: /* Core */
1079         case PMC_EV_IAP_EVENT_40H: /* Core */
1080         case PMC_EV_IAP_EVENT_41H: /* Core */
1081         case PMC_EV_IAP_EVENT_42H: /* Core, Core2, Atom */
1082                 if (cachestate == 0)
1083                         cachestate = (0xF << 8);
1084                 break;
1085         case PMC_EV_IAP_EVENT_77H: /* Atom */
1086                 /* IAP_EVENT_77H only accepts a cachestate qualifier on the
1087                  * Atom processor
1088                  */
1089                 if(cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM && cachestate == 0)
1090                         cachestate = (0xF << 8);
1091             break;
1092         default:
1093                 break;
1094         }
1095
1096         pmc_config->pm_md.pm_iap.pm_iap_config |= cachestate;
1097         pmc_config->pm_md.pm_iap.pm_iap_rsp = rsp;
1098
1099         return (0);
1100 }
1101
1102 /*
1103  * Intel Uncore.
1104  */
1105
1106 static int
1107 ucf_allocate_pmc(enum pmc_event pe, char *ctrspec,
1108     struct pmc_op_pmcallocate *pmc_config)
1109 {
1110         (void) pe;
1111         (void) ctrspec;
1112
1113         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1114         pmc_config->pm_md.pm_ucf.pm_ucf_flags = 0;
1115
1116         return (0);
1117 }
1118
1119 #define UCP_KW_CMASK            "cmask"
1120 #define UCP_KW_EDGE             "edge"
1121 #define UCP_KW_INV              "inv"
1122
1123 static int
1124 ucp_allocate_pmc(enum pmc_event pe, char *ctrspec,
1125     struct pmc_op_pmcallocate *pmc_config)
1126 {
1127         char *e, *p, *q;
1128         int count, n;
1129
1130         (void) pe;
1131
1132         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
1133             PMC_CAP_QUALIFIER);
1134         pmc_config->pm_md.pm_ucp.pm_ucp_config = 0;
1135
1136         /* Parse additional modifiers if present */
1137         while ((p = strsep(&ctrspec, ",")) != NULL) {
1138
1139                 n = 0;
1140                 if (KWPREFIXMATCH(p, UCP_KW_CMASK "=")) {
1141                         q = strchr(p, '=');
1142                         if (*++q == '\0') /* skip '=' */
1143                                 return (-1);
1144                         count = strtol(q, &e, 0);
1145                         if (e == q || *e != '\0')
1146                                 return (-1);
1147                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1148                         pmc_config->pm_md.pm_ucp.pm_ucp_config |=
1149                             UCP_CMASK(count);
1150                 } else if (KWMATCH(p, UCP_KW_EDGE)) {
1151                         pmc_config->pm_caps |= PMC_CAP_EDGE;
1152                 } else if (KWMATCH(p, UCP_KW_INV)) {
1153                         pmc_config->pm_caps |= PMC_CAP_INVERT;
1154                 } else
1155                         return (-1);
1156
1157                 if (n < 0)      /* Parsing failed. */
1158                         return (-1);
1159         }
1160
1161         return (0);
1162 }
1163
1164 /*
1165  * AMD K8 PMCs.
1166  *
1167  * These are very similar to AMD K7 PMCs, but support more kinds of
1168  * events.
1169  */
1170
1171 static struct pmc_event_alias k8_aliases[] = {
1172         EV_ALIAS("branches",            "k8-fr-retired-taken-branches"),
1173         EV_ALIAS("branch-mispredicts",
1174             "k8-fr-retired-taken-branches-mispredicted"),
1175         EV_ALIAS("cycles",              "tsc"),
1176         EV_ALIAS("dc-misses",           "k8-dc-miss"),
1177         EV_ALIAS("ic-misses",           "k8-ic-miss"),
1178         EV_ALIAS("instructions",        "k8-fr-retired-x86-instructions"),
1179         EV_ALIAS("interrupts",          "k8-fr-taken-hardware-interrupts"),
1180         EV_ALIAS("unhalted-cycles",     "k8-bu-cpu-clk-unhalted"),
1181         EV_ALIAS(NULL, NULL)
1182 };
1183
1184 #define __K8MASK(N,V) PMCMASK(N,(1 << (V)))
1185
1186 /*
1187  * Parsing tables
1188  */
1189
1190 /* fp dispatched fpu ops */
1191 static const struct pmc_masks k8_mask_fdfo[] = {
1192         __K8MASK(add-pipe-excluding-junk-ops,   0),
1193         __K8MASK(multiply-pipe-excluding-junk-ops,      1),
1194         __K8MASK(store-pipe-excluding-junk-ops, 2),
1195         __K8MASK(add-pipe-junk-ops,             3),
1196         __K8MASK(multiply-pipe-junk-ops,        4),
1197         __K8MASK(store-pipe-junk-ops,           5),
1198         NULLMASK
1199 };
1200
1201 /* ls segment register loads */
1202 static const struct pmc_masks k8_mask_lsrl[] = {
1203         __K8MASK(es,    0),
1204         __K8MASK(cs,    1),
1205         __K8MASK(ss,    2),
1206         __K8MASK(ds,    3),
1207         __K8MASK(fs,    4),
1208         __K8MASK(gs,    5),
1209         __K8MASK(hs,    6),
1210         NULLMASK
1211 };
1212
1213 /* ls locked operation */
1214 static const struct pmc_masks k8_mask_llo[] = {
1215         __K8MASK(locked-instructions,   0),
1216         __K8MASK(cycles-in-request,     1),
1217         __K8MASK(cycles-to-complete,    2),
1218         NULLMASK
1219 };
1220
1221 /* dc refill from {l2,system} and dc copyback */
1222 static const struct pmc_masks k8_mask_dc[] = {
1223         __K8MASK(invalid,       0),
1224         __K8MASK(shared,        1),
1225         __K8MASK(exclusive,     2),
1226         __K8MASK(owner,         3),
1227         __K8MASK(modified,      4),
1228         NULLMASK
1229 };
1230
1231 /* dc one bit ecc error */
1232 static const struct pmc_masks k8_mask_dobee[] = {
1233         __K8MASK(scrubber,      0),
1234         __K8MASK(piggyback,     1),
1235         NULLMASK
1236 };
1237
1238 /* dc dispatched prefetch instructions */
1239 static const struct pmc_masks k8_mask_ddpi[] = {
1240         __K8MASK(load,  0),
1241         __K8MASK(store, 1),
1242         __K8MASK(nta,   2),
1243         NULLMASK
1244 };
1245
1246 /* dc dcache accesses by locks */
1247 static const struct pmc_masks k8_mask_dabl[] = {
1248         __K8MASK(accesses,      0),
1249         __K8MASK(misses,        1),
1250         NULLMASK
1251 };
1252
1253 /* bu internal l2 request */
1254 static const struct pmc_masks k8_mask_bilr[] = {
1255         __K8MASK(ic-fill,       0),
1256         __K8MASK(dc-fill,       1),
1257         __K8MASK(tlb-reload,    2),
1258         __K8MASK(tag-snoop,     3),
1259         __K8MASK(cancelled,     4),
1260         NULLMASK
1261 };
1262
1263 /* bu fill request l2 miss */
1264 static const struct pmc_masks k8_mask_bfrlm[] = {
1265         __K8MASK(ic-fill,       0),
1266         __K8MASK(dc-fill,       1),
1267         __K8MASK(tlb-reload,    2),
1268         NULLMASK
1269 };
1270
1271 /* bu fill into l2 */
1272 static const struct pmc_masks k8_mask_bfil[] = {
1273         __K8MASK(dirty-l2-victim,       0),
1274         __K8MASK(victim-from-l2,        1),
1275         NULLMASK
1276 };
1277
1278 /* fr retired fpu instructions */
1279 static const struct pmc_masks k8_mask_frfi[] = {
1280         __K8MASK(x87,                   0),
1281         __K8MASK(mmx-3dnow,             1),
1282         __K8MASK(packed-sse-sse2,       2),
1283         __K8MASK(scalar-sse-sse2,       3),
1284         NULLMASK
1285 };
1286
1287 /* fr retired fastpath double op instructions */
1288 static const struct pmc_masks k8_mask_frfdoi[] = {
1289         __K8MASK(low-op-pos-0,          0),
1290         __K8MASK(low-op-pos-1,          1),
1291         __K8MASK(low-op-pos-2,          2),
1292         NULLMASK
1293 };
1294
1295 /* fr fpu exceptions */
1296 static const struct pmc_masks k8_mask_ffe[] = {
1297         __K8MASK(x87-reclass-microfaults,       0),
1298         __K8MASK(sse-retype-microfaults,        1),
1299         __K8MASK(sse-reclass-microfaults,       2),
1300         __K8MASK(sse-and-x87-microtraps,        3),
1301         NULLMASK
1302 };
1303
1304 /* nb memory controller page access event */
1305 static const struct pmc_masks k8_mask_nmcpae[] = {
1306         __K8MASK(page-hit,      0),
1307         __K8MASK(page-miss,     1),
1308         __K8MASK(page-conflict, 2),
1309         NULLMASK
1310 };
1311
1312 /* nb memory controller turnaround */
1313 static const struct pmc_masks k8_mask_nmct[] = {
1314         __K8MASK(dimm-turnaround,               0),
1315         __K8MASK(read-to-write-turnaround,      1),
1316         __K8MASK(write-to-read-turnaround,      2),
1317         NULLMASK
1318 };
1319
1320 /* nb memory controller bypass saturation */
1321 static const struct pmc_masks k8_mask_nmcbs[] = {
1322         __K8MASK(memory-controller-hi-pri-bypass,       0),
1323         __K8MASK(memory-controller-lo-pri-bypass,       1),
1324         __K8MASK(dram-controller-interface-bypass,      2),
1325         __K8MASK(dram-controller-queue-bypass,          3),
1326         NULLMASK
1327 };
1328
1329 /* nb sized commands */
1330 static const struct pmc_masks k8_mask_nsc[] = {
1331         __K8MASK(nonpostwrszbyte,       0),
1332         __K8MASK(nonpostwrszdword,      1),
1333         __K8MASK(postwrszbyte,          2),
1334         __K8MASK(postwrszdword,         3),
1335         __K8MASK(rdszbyte,              4),
1336         __K8MASK(rdszdword,             5),
1337         __K8MASK(rdmodwr,               6),
1338         NULLMASK
1339 };
1340
1341 /* nb probe result */
1342 static const struct pmc_masks k8_mask_npr[] = {
1343         __K8MASK(probe-miss,            0),
1344         __K8MASK(probe-hit,             1),
1345         __K8MASK(probe-hit-dirty-no-memory-cancel, 2),
1346         __K8MASK(probe-hit-dirty-with-memory-cancel, 3),
1347         NULLMASK
1348 };
1349
1350 /* nb hypertransport bus bandwidth */
1351 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
1352         __K8MASK(command,       0),
1353         __K8MASK(data,  1),
1354         __K8MASK(buffer-release, 2),
1355         __K8MASK(nop,   3),
1356         NULLMASK
1357 };
1358
1359 #undef  __K8MASK
1360
1361 #define K8_KW_COUNT     "count"
1362 #define K8_KW_EDGE      "edge"
1363 #define K8_KW_INV       "inv"
1364 #define K8_KW_MASK      "mask"
1365 #define K8_KW_OS        "os"
1366 #define K8_KW_USR       "usr"
1367
1368 static int
1369 k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
1370     struct pmc_op_pmcallocate *pmc_config)
1371 {
1372         char            *e, *p, *q;
1373         int             n;
1374         uint32_t        count;
1375         uint64_t        evmask;
1376         const struct pmc_masks  *pm, *pmask;
1377
1378         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1379         pmc_config->pm_md.pm_amd.pm_amd_config = 0;
1380
1381         pmask = NULL;
1382         evmask = 0;
1383
1384 #define __K8SETMASK(M) pmask = k8_mask_##M
1385
1386         /* setup parsing tables */
1387         switch (pe) {
1388         case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1389                 __K8SETMASK(fdfo);
1390                 break;
1391         case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
1392                 __K8SETMASK(lsrl);
1393                 break;
1394         case PMC_EV_K8_LS_LOCKED_OPERATION:
1395                 __K8SETMASK(llo);
1396                 break;
1397         case PMC_EV_K8_DC_REFILL_FROM_L2:
1398         case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
1399         case PMC_EV_K8_DC_COPYBACK:
1400                 __K8SETMASK(dc);
1401                 break;
1402         case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
1403                 __K8SETMASK(dobee);
1404                 break;
1405         case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
1406                 __K8SETMASK(ddpi);
1407                 break;
1408         case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1409                 __K8SETMASK(dabl);
1410                 break;
1411         case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
1412                 __K8SETMASK(bilr);
1413                 break;
1414         case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
1415                 __K8SETMASK(bfrlm);
1416                 break;
1417         case PMC_EV_K8_BU_FILL_INTO_L2:
1418                 __K8SETMASK(bfil);
1419                 break;
1420         case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1421                 __K8SETMASK(frfi);
1422                 break;
1423         case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1424                 __K8SETMASK(frfdoi);
1425                 break;
1426         case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1427                 __K8SETMASK(ffe);
1428                 break;
1429         case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
1430                 __K8SETMASK(nmcpae);
1431                 break;
1432         case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
1433                 __K8SETMASK(nmct);
1434                 break;
1435         case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
1436                 __K8SETMASK(nmcbs);
1437                 break;
1438         case PMC_EV_K8_NB_SIZED_COMMANDS:
1439                 __K8SETMASK(nsc);
1440                 break;
1441         case PMC_EV_K8_NB_PROBE_RESULT:
1442                 __K8SETMASK(npr);
1443                 break;
1444         case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
1445         case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
1446         case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
1447                 __K8SETMASK(nhbb);
1448                 break;
1449
1450         default:
1451                 break;          /* no options defined */
1452         }
1453
1454         while ((p = strsep(&ctrspec, ",")) != NULL) {
1455                 if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
1456                         q = strchr(p, '=');
1457                         if (*++q == '\0') /* skip '=' */
1458                                 return (-1);
1459
1460                         count = strtol(q, &e, 0);
1461                         if (e == q || *e != '\0')
1462                                 return (-1);
1463
1464                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1465                         pmc_config->pm_md.pm_amd.pm_amd_config |=
1466                             AMD_PMC_TO_COUNTER(count);
1467
1468                 } else if (KWMATCH(p, K8_KW_EDGE)) {
1469                         pmc_config->pm_caps |= PMC_CAP_EDGE;
1470                 } else if (KWMATCH(p, K8_KW_INV)) {
1471                         pmc_config->pm_caps |= PMC_CAP_INVERT;
1472                 } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
1473                         if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1474                                 return (-1);
1475                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1476                 } else if (KWMATCH(p, K8_KW_OS)) {
1477                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1478                 } else if (KWMATCH(p, K8_KW_USR)) {
1479                         pmc_config->pm_caps |= PMC_CAP_USER;
1480                 } else
1481                         return (-1);
1482         }
1483
1484         /* other post processing */
1485         switch (pe) {
1486         case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1487         case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
1488         case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
1489         case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1490         case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1491         case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1492                 /* XXX only available in rev B and later */
1493                 break;
1494         case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1495                 /* XXX only available in rev C and later */
1496                 break;
1497         case PMC_EV_K8_LS_LOCKED_OPERATION:
1498                 /* XXX CPU Rev A,B evmask is to be zero */
1499                 if (evmask & (evmask - 1)) /* > 1 bit set */
1500                         return (-1);
1501                 if (evmask == 0) {
1502                         evmask = 0x01; /* Rev C and later: #instrs */
1503                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1504                 }
1505                 break;
1506         default:
1507                 if (evmask == 0 && pmask != NULL) {
1508                         for (pm = pmask; pm->pm_name; pm++)
1509                                 evmask |= pm->pm_value;
1510                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1511                 }
1512         }
1513
1514         if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1515                 pmc_config->pm_md.pm_amd.pm_amd_config =
1516                     AMD_PMC_TO_UNITMASK(evmask);
1517
1518         return (0);
1519 }
1520
1521 #endif
1522
1523 #if defined(__amd64__) || defined(__i386__)
1524
1525 /*
1526  * Intel P4 PMCs
1527  */
1528
1529 static struct pmc_event_alias p4_aliases[] = {
1530         EV_ALIAS("branches",            "p4-branch-retired,mask=mmtp+mmtm"),
1531         EV_ALIAS("branch-mispredicts",  "p4-mispred-branch-retired"),
1532         EV_ALIAS("cycles",              "tsc"),
1533         EV_ALIAS("instructions",
1534             "p4-instr-retired,mask=nbogusntag+nbogustag"),
1535         EV_ALIAS("unhalted-cycles",     "p4-global-power-events"),
1536         EV_ALIAS(NULL, NULL)
1537 };
1538
1539 #define P4_KW_ACTIVE    "active"
1540 #define P4_KW_ACTIVE_ANY "any"
1541 #define P4_KW_ACTIVE_BOTH "both"
1542 #define P4_KW_ACTIVE_NONE "none"
1543 #define P4_KW_ACTIVE_SINGLE "single"
1544 #define P4_KW_BUSREQTYPE "busreqtype"
1545 #define P4_KW_CASCADE   "cascade"
1546 #define P4_KW_EDGE      "edge"
1547 #define P4_KW_INV       "complement"
1548 #define P4_KW_OS        "os"
1549 #define P4_KW_MASK      "mask"
1550 #define P4_KW_PRECISE   "precise"
1551 #define P4_KW_TAG       "tag"
1552 #define P4_KW_THRESHOLD "threshold"
1553 #define P4_KW_USR       "usr"
1554
1555 #define __P4MASK(N,V) PMCMASK(N, (1 << (V)))
1556
1557 static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
1558         __P4MASK(dd, 0),
1559         __P4MASK(db, 1),
1560         __P4MASK(di, 2),
1561         __P4MASK(bd, 3),
1562         __P4MASK(bb, 4),
1563         __P4MASK(bi, 5),
1564         __P4MASK(id, 6),
1565         __P4MASK(ib, 7),
1566         NULLMASK
1567 };
1568
1569 static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
1570         __P4MASK(tcmiss, 0),
1571         NULLMASK,
1572 };
1573
1574 static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
1575         __P4MASK(hit, 0),
1576         __P4MASK(miss, 1),
1577         __P4MASK(hit-uc, 2),
1578         NULLMASK
1579 };
1580
1581 static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
1582         __P4MASK(st-rb-full, 2),
1583         __P4MASK(64k-conf, 3),
1584         NULLMASK
1585 };
1586
1587 static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
1588         __P4MASK(lsc, 0),
1589         __P4MASK(ssc, 1),
1590         NULLMASK
1591 };
1592
1593 static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
1594         __P4MASK(split-ld, 1),
1595         NULLMASK
1596 };
1597
1598 static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
1599         __P4MASK(split-st, 1),
1600         NULLMASK
1601 };
1602
1603 static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
1604         __P4MASK(no-sta, 1),
1605         __P4MASK(no-std, 3),
1606         __P4MASK(partial-data, 4),
1607         __P4MASK(unalgn-addr, 5),
1608         NULLMASK
1609 };
1610
1611 static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
1612         __P4MASK(dtmiss, 0),
1613         __P4MASK(itmiss, 1),
1614         NULLMASK
1615 };
1616
1617 static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
1618         __P4MASK(rd-2ndl-hits, 0),
1619         __P4MASK(rd-2ndl-hite, 1),
1620         __P4MASK(rd-2ndl-hitm, 2),
1621         __P4MASK(rd-3rdl-hits, 3),
1622         __P4MASK(rd-3rdl-hite, 4),
1623         __P4MASK(rd-3rdl-hitm, 5),
1624         __P4MASK(rd-2ndl-miss, 8),
1625         __P4MASK(rd-3rdl-miss, 9),
1626         __P4MASK(wr-2ndl-miss, 10),
1627         NULLMASK
1628 };
1629
1630 static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
1631         __P4MASK(all-read, 5),
1632         __P4MASK(all-write, 6),
1633         __P4MASK(mem-uc, 7),
1634         __P4MASK(mem-wc, 8),
1635         __P4MASK(mem-wt, 9),
1636         __P4MASK(mem-wp, 10),
1637         __P4MASK(mem-wb, 11),
1638         __P4MASK(own, 13),
1639         __P4MASK(other, 14),
1640         __P4MASK(prefetch, 15),
1641         NULLMASK
1642 };
1643
1644 static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
1645         __P4MASK(all-read, 5),
1646         __P4MASK(all-write, 6),
1647         __P4MASK(mem-uc, 7),
1648         __P4MASK(mem-wc, 8),
1649         __P4MASK(mem-wt, 9),
1650         __P4MASK(mem-wp, 10),
1651         __P4MASK(mem-wb, 11),
1652         __P4MASK(own, 13),
1653         __P4MASK(other, 14),
1654         __P4MASK(prefetch, 15),
1655         NULLMASK
1656 };
1657
1658 static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
1659         __P4MASK(drdy-drv, 0),
1660         __P4MASK(drdy-own, 1),
1661         __P4MASK(drdy-other, 2),
1662         __P4MASK(dbsy-drv, 3),
1663         __P4MASK(dbsy-own, 4),
1664         __P4MASK(dbsy-other, 5),
1665         NULLMASK
1666 };
1667
1668 static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
1669         __P4MASK(req-type0, 0),
1670         __P4MASK(req-type1, 1),
1671         __P4MASK(req-len0, 2),
1672         __P4MASK(req-len1, 3),
1673         __P4MASK(req-io-type, 5),
1674         __P4MASK(req-lock-type, 6),
1675         __P4MASK(req-cache-type, 7),
1676         __P4MASK(req-split-type, 8),
1677         __P4MASK(req-dem-type, 9),
1678         __P4MASK(req-ord-type, 10),
1679         __P4MASK(mem-type0, 11),
1680         __P4MASK(mem-type1, 12),
1681         __P4MASK(mem-type2, 13),
1682         NULLMASK
1683 };
1684
1685 static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
1686         __P4MASK(all, 15),
1687         NULLMASK
1688 };
1689
1690 static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
1691         __P4MASK(all, 15),
1692         NULLMASK
1693 };
1694
1695 static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
1696         __P4MASK(all, 15),
1697         NULLMASK
1698 };
1699
1700 static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
1701         __P4MASK(all, 15),
1702         NULLMASK
1703 };
1704
1705 static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
1706         __P4MASK(all, 15),
1707         NULLMASK
1708 };
1709
1710 static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
1711         __P4MASK(all, 15),
1712         NULLMASK
1713 };
1714
1715 static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
1716         __P4MASK(all, 15),
1717         NULLMASK
1718 };
1719
1720 static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
1721         __P4MASK(all, 15),
1722         NULLMASK
1723 };
1724
1725 static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
1726         __P4MASK(allp0, 3),
1727         __P4MASK(allp2, 4),
1728         NULLMASK
1729 };
1730
1731 static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
1732         __P4MASK(running, 0),
1733         NULLMASK
1734 };
1735
1736 static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
1737         __P4MASK(cisc, 0),
1738         NULLMASK
1739 };
1740
1741 static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
1742         __P4MASK(from-tc-build, 0),
1743         __P4MASK(from-tc-deliver, 1),
1744         __P4MASK(from-rom, 2),
1745         NULLMASK
1746 };
1747
1748 static const struct pmc_masks p4_mask_rmbt[] = {
1749         /* retired mispred branch type */
1750         __P4MASK(conditional, 1),
1751         __P4MASK(call, 2),
1752         __P4MASK(return, 3),
1753         __P4MASK(indirect, 4),
1754         NULLMASK
1755 };
1756
1757 static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
1758         __P4MASK(conditional, 1),
1759         __P4MASK(call, 2),
1760         __P4MASK(retired, 3),
1761         __P4MASK(indirect, 4),
1762         NULLMASK
1763 };
1764
1765 static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
1766         __P4MASK(sbfull, 5),
1767         NULLMASK
1768 };
1769
1770 static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
1771         __P4MASK(wcb-evicts, 0),
1772         __P4MASK(wcb-full-evict, 1),
1773         NULLMASK
1774 };
1775
1776 static const struct pmc_masks p4_mask_fee[] = { /* front end event */
1777         __P4MASK(nbogus, 0),
1778         __P4MASK(bogus, 1),
1779         NULLMASK
1780 };
1781
1782 static const struct pmc_masks p4_mask_ee[] = { /* execution event */
1783         __P4MASK(nbogus0, 0),
1784         __P4MASK(nbogus1, 1),
1785         __P4MASK(nbogus2, 2),
1786         __P4MASK(nbogus3, 3),
1787         __P4MASK(bogus0, 4),
1788         __P4MASK(bogus1, 5),
1789         __P4MASK(bogus2, 6),
1790         __P4MASK(bogus3, 7),
1791         NULLMASK
1792 };
1793
1794 static const struct pmc_masks p4_mask_re[] = { /* replay event */
1795         __P4MASK(nbogus, 0),
1796         __P4MASK(bogus, 1),
1797         NULLMASK
1798 };
1799
1800 static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
1801         __P4MASK(nbogusntag, 0),
1802         __P4MASK(nbogustag, 1),
1803         __P4MASK(bogusntag, 2),
1804         __P4MASK(bogustag, 3),
1805         NULLMASK
1806 };
1807
1808 static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
1809         __P4MASK(nbogus, 0),
1810         __P4MASK(bogus, 1),
1811         NULLMASK
1812 };
1813
1814 static const struct pmc_masks p4_mask_ut[] = { /* uop type */
1815         __P4MASK(tagloads, 1),
1816         __P4MASK(tagstores, 2),
1817         NULLMASK
1818 };
1819
1820 static const struct pmc_masks p4_mask_br[] = { /* branch retired */
1821         __P4MASK(mmnp, 0),
1822         __P4MASK(mmnm, 1),
1823         __P4MASK(mmtp, 2),
1824         __P4MASK(mmtm, 3),
1825         NULLMASK
1826 };
1827
1828 static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
1829         __P4MASK(nbogus, 0),
1830         NULLMASK
1831 };
1832
1833 static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
1834         __P4MASK(fpsu, 0),
1835         __P4MASK(fpso, 1),
1836         __P4MASK(poao, 2),
1837         __P4MASK(poau, 3),
1838         __P4MASK(prea, 4),
1839         NULLMASK
1840 };
1841
1842 static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
1843         __P4MASK(clear, 0),
1844         __P4MASK(moclear, 2),
1845         __P4MASK(smclear, 3),
1846         NULLMASK
1847 };
1848
1849 /* P4 event parser */
1850 static int
1851 p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
1852     struct pmc_op_pmcallocate *pmc_config)
1853 {
1854
1855         char    *e, *p, *q;
1856         int     count, has_tag, has_busreqtype, n;
1857         uint32_t cccractivemask;
1858         uint64_t evmask;
1859         const struct pmc_masks *pm, *pmask;
1860
1861         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1862         pmc_config->pm_md.pm_p4.pm_p4_cccrconfig =
1863             pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0;
1864
1865         pmask   = NULL;
1866         evmask  = 0;
1867         cccractivemask = 0x3;
1868         has_tag = has_busreqtype = 0;
1869
1870 #define __P4SETMASK(M) do {                             \
1871         pmask = p4_mask_##M;                            \
1872 } while (0)
1873
1874         switch (pe) {
1875         case PMC_EV_P4_TC_DELIVER_MODE:
1876                 __P4SETMASK(tcdm);
1877                 break;
1878         case PMC_EV_P4_BPU_FETCH_REQUEST:
1879                 __P4SETMASK(bfr);
1880                 break;
1881         case PMC_EV_P4_ITLB_REFERENCE:
1882                 __P4SETMASK(ir);
1883                 break;
1884         case PMC_EV_P4_MEMORY_CANCEL:
1885                 __P4SETMASK(memcan);
1886                 break;
1887         case PMC_EV_P4_MEMORY_COMPLETE:
1888                 __P4SETMASK(memcomp);
1889                 break;
1890         case PMC_EV_P4_LOAD_PORT_REPLAY:
1891                 __P4SETMASK(lpr);
1892                 break;
1893         case PMC_EV_P4_STORE_PORT_REPLAY:
1894                 __P4SETMASK(spr);
1895                 break;
1896         case PMC_EV_P4_MOB_LOAD_REPLAY:
1897                 __P4SETMASK(mlr);
1898                 break;
1899         case PMC_EV_P4_PAGE_WALK_TYPE:
1900                 __P4SETMASK(pwt);
1901                 break;
1902         case PMC_EV_P4_BSQ_CACHE_REFERENCE:
1903                 __P4SETMASK(bcr);
1904                 break;
1905         case PMC_EV_P4_IOQ_ALLOCATION:
1906                 __P4SETMASK(ia);
1907                 has_busreqtype = 1;
1908                 break;
1909         case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
1910                 __P4SETMASK(iae);
1911                 has_busreqtype = 1;
1912                 break;
1913         case PMC_EV_P4_FSB_DATA_ACTIVITY:
1914                 __P4SETMASK(fda);
1915                 break;
1916         case PMC_EV_P4_BSQ_ALLOCATION:
1917                 __P4SETMASK(ba);
1918                 break;
1919         case PMC_EV_P4_SSE_INPUT_ASSIST:
1920                 __P4SETMASK(sia);
1921                 break;
1922         case PMC_EV_P4_PACKED_SP_UOP:
1923                 __P4SETMASK(psu);
1924                 break;
1925         case PMC_EV_P4_PACKED_DP_UOP:
1926                 __P4SETMASK(pdu);
1927                 break;
1928         case PMC_EV_P4_SCALAR_SP_UOP:
1929                 __P4SETMASK(ssu);
1930                 break;
1931         case PMC_EV_P4_SCALAR_DP_UOP:
1932                 __P4SETMASK(sdu);
1933                 break;
1934         case PMC_EV_P4_64BIT_MMX_UOP:
1935                 __P4SETMASK(64bmu);
1936                 break;
1937         case PMC_EV_P4_128BIT_MMX_UOP:
1938                 __P4SETMASK(128bmu);
1939                 break;
1940         case PMC_EV_P4_X87_FP_UOP:
1941                 __P4SETMASK(xfu);
1942                 break;
1943         case PMC_EV_P4_X87_SIMD_MOVES_UOP:
1944                 __P4SETMASK(xsmu);
1945                 break;
1946         case PMC_EV_P4_GLOBAL_POWER_EVENTS:
1947                 __P4SETMASK(gpe);
1948                 break;
1949         case PMC_EV_P4_TC_MS_XFER:
1950                 __P4SETMASK(tmx);
1951                 break;
1952         case PMC_EV_P4_UOP_QUEUE_WRITES:
1953                 __P4SETMASK(uqw);
1954                 break;
1955         case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
1956                 __P4SETMASK(rmbt);
1957                 break;
1958         case PMC_EV_P4_RETIRED_BRANCH_TYPE:
1959                 __P4SETMASK(rbt);
1960                 break;
1961         case PMC_EV_P4_RESOURCE_STALL:
1962                 __P4SETMASK(rs);
1963                 break;
1964         case PMC_EV_P4_WC_BUFFER:
1965                 __P4SETMASK(wb);
1966                 break;
1967         case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
1968         case PMC_EV_P4_B2B_CYCLES:
1969         case PMC_EV_P4_BNR:
1970         case PMC_EV_P4_SNOOP:
1971         case PMC_EV_P4_RESPONSE:
1972                 break;
1973         case PMC_EV_P4_FRONT_END_EVENT:
1974                 __P4SETMASK(fee);
1975                 break;
1976         case PMC_EV_P4_EXECUTION_EVENT:
1977                 __P4SETMASK(ee);
1978                 break;
1979         case PMC_EV_P4_REPLAY_EVENT:
1980                 __P4SETMASK(re);
1981                 break;
1982         case PMC_EV_P4_INSTR_RETIRED:
1983                 __P4SETMASK(insret);
1984                 break;
1985         case PMC_EV_P4_UOPS_RETIRED:
1986                 __P4SETMASK(ur);
1987                 break;
1988         case PMC_EV_P4_UOP_TYPE:
1989                 __P4SETMASK(ut);
1990                 break;
1991         case PMC_EV_P4_BRANCH_RETIRED:
1992                 __P4SETMASK(br);
1993                 break;
1994         case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
1995                 __P4SETMASK(mbr);
1996                 break;
1997         case PMC_EV_P4_X87_ASSIST:
1998                 __P4SETMASK(xa);
1999                 break;
2000         case PMC_EV_P4_MACHINE_CLEAR:
2001                 __P4SETMASK(machclr);
2002                 break;
2003         default:
2004                 return (-1);
2005         }
2006
2007         /* process additional flags */
2008         while ((p = strsep(&ctrspec, ",")) != NULL) {
2009                 if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
2010                         q = strchr(p, '=');
2011                         if (*++q == '\0') /* skip '=' */
2012                                 return (-1);
2013
2014                         if (strcasecmp(q, P4_KW_ACTIVE_NONE) == 0)
2015                                 cccractivemask = 0x0;
2016                         else if (strcasecmp(q, P4_KW_ACTIVE_SINGLE) == 0)
2017                                 cccractivemask = 0x1;
2018                         else if (strcasecmp(q, P4_KW_ACTIVE_BOTH) == 0)
2019                                 cccractivemask = 0x2;
2020                         else if (strcasecmp(q, P4_KW_ACTIVE_ANY) == 0)
2021                                 cccractivemask = 0x3;
2022                         else
2023                                 return (-1);
2024
2025                 } else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
2026                         if (has_busreqtype == 0)
2027                                 return (-1);
2028
2029                         q = strchr(p, '=');
2030                         if (*++q == '\0') /* skip '=' */
2031                                 return (-1);
2032
2033                         count = strtol(q, &e, 0);
2034                         if (e == q || *e != '\0')
2035                                 return (-1);
2036                         evmask = (evmask & ~0x1F) | (count & 0x1F);
2037                 } else if (KWMATCH(p, P4_KW_CASCADE))
2038                         pmc_config->pm_caps |= PMC_CAP_CASCADE;
2039                 else if (KWMATCH(p, P4_KW_EDGE))
2040                         pmc_config->pm_caps |= PMC_CAP_EDGE;
2041                 else if (KWMATCH(p, P4_KW_INV))
2042                         pmc_config->pm_caps |= PMC_CAP_INVERT;
2043                 else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
2044                         if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
2045                                 return (-1);
2046                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2047                 } else if (KWMATCH(p, P4_KW_OS))
2048                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2049                 else if (KWMATCH(p, P4_KW_PRECISE))
2050                         pmc_config->pm_caps |= PMC_CAP_PRECISE;
2051                 else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
2052                         if (has_tag == 0)
2053                                 return (-1);
2054
2055                         q = strchr(p, '=');
2056                         if (*++q == '\0') /* skip '=' */
2057                                 return (-1);
2058
2059                         count = strtol(q, &e, 0);
2060                         if (e == q || *e != '\0')
2061                                 return (-1);
2062
2063                         pmc_config->pm_caps |= PMC_CAP_TAGGING;
2064                         pmc_config->pm_md.pm_p4.pm_p4_escrconfig |=
2065                             P4_ESCR_TO_TAG_VALUE(count);
2066                 } else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
2067                         q = strchr(p, '=');
2068                         if (*++q == '\0') /* skip '=' */
2069                                 return (-1);
2070
2071                         count = strtol(q, &e, 0);
2072                         if (e == q || *e != '\0')
2073                                 return (-1);
2074
2075                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
2076                         pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &=
2077                             ~P4_CCCR_THRESHOLD_MASK;
2078                         pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
2079                             P4_CCCR_TO_THRESHOLD(count);
2080                 } else if (KWMATCH(p, P4_KW_USR))
2081                         pmc_config->pm_caps |= PMC_CAP_USER;
2082                 else
2083                         return (-1);
2084         }
2085
2086         /* other post processing */
2087         if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
2088             pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
2089             pe == PMC_EV_P4_BSQ_ALLOCATION)
2090                 pmc_config->pm_caps |= PMC_CAP_EDGE;
2091
2092         /* fill in thread activity mask */
2093         pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
2094             P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
2095
2096         if (evmask)
2097                 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2098
2099         switch (pe) {
2100         case PMC_EV_P4_FSB_DATA_ACTIVITY:
2101                 if ((evmask & 0x06) == 0x06 ||
2102                     (evmask & 0x18) == 0x18)
2103                         return (-1); /* can't have own+other bits together */
2104                 if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
2105                         evmask = 0x1D;
2106                 break;
2107         case PMC_EV_P4_MACHINE_CLEAR:
2108                 /* only one bit is allowed to be set */
2109                 if ((evmask & (evmask - 1)) != 0)
2110                         return (-1);
2111                 if (evmask == 0) {
2112                         evmask = 0x1;   /* 'CLEAR' */
2113                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2114                 }
2115                 break;
2116         default:
2117                 if (evmask == 0 && pmask) {
2118                         for (pm = pmask; pm->pm_name; pm++)
2119                                 evmask |= pm->pm_value;
2120                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2121                 }
2122         }
2123
2124         pmc_config->pm_md.pm_p4.pm_p4_escrconfig =
2125             P4_ESCR_TO_EVENT_MASK(evmask);
2126
2127         return (0);
2128 }
2129
2130 #endif
2131
2132 #if defined(__i386__)
2133
2134 /*
2135  * Pentium style PMCs
2136  */
2137
2138 static struct pmc_event_alias p5_aliases[] = {
2139         EV_ALIAS("branches",            "p5-taken-branches"),
2140         EV_ALIAS("cycles",              "tsc"),
2141         EV_ALIAS("dc-misses",           "p5-data-read-miss-or-write-miss"),
2142         EV_ALIAS("ic-misses",           "p5-code-cache-miss"),
2143         EV_ALIAS("instructions",        "p5-instructions-executed"),
2144         EV_ALIAS("interrupts",          "p5-hardware-interrupts"),
2145         EV_ALIAS("unhalted-cycles",
2146             "p5-number-of-cycles-not-in-halt-state"),
2147         EV_ALIAS(NULL, NULL)
2148 };
2149
2150 static int
2151 p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
2152     struct pmc_op_pmcallocate *pmc_config)
2153 {
2154         return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */
2155 }
2156
2157 /*
2158  * Pentium Pro style PMCs.  These PMCs are found in Pentium II, Pentium III,
2159  * and Pentium M CPUs.
2160  */
2161
2162 static struct pmc_event_alias p6_aliases[] = {
2163         EV_ALIAS("branches",            "p6-br-inst-retired"),
2164         EV_ALIAS("branch-mispredicts",  "p6-br-miss-pred-retired"),
2165         EV_ALIAS("cycles",              "tsc"),
2166         EV_ALIAS("dc-misses",           "p6-dcu-lines-in"),
2167         EV_ALIAS("ic-misses",           "p6-ifu-fetch-miss"),
2168         EV_ALIAS("instructions",        "p6-inst-retired"),
2169         EV_ALIAS("interrupts",          "p6-hw-int-rx"),
2170         EV_ALIAS("unhalted-cycles",     "p6-cpu-clk-unhalted"),
2171         EV_ALIAS(NULL, NULL)
2172 };
2173
2174 #define P6_KW_CMASK     "cmask"
2175 #define P6_KW_EDGE      "edge"
2176 #define P6_KW_INV       "inv"
2177 #define P6_KW_OS        "os"
2178 #define P6_KW_UMASK     "umask"
2179 #define P6_KW_USR       "usr"
2180
2181 static struct pmc_masks p6_mask_mesi[] = {
2182         PMCMASK(m,      0x01),
2183         PMCMASK(e,      0x02),
2184         PMCMASK(s,      0x04),
2185         PMCMASK(i,      0x08),
2186         NULLMASK
2187 };
2188
2189 static struct pmc_masks p6_mask_mesihw[] = {
2190         PMCMASK(m,      0x01),
2191         PMCMASK(e,      0x02),
2192         PMCMASK(s,      0x04),
2193         PMCMASK(i,      0x08),
2194         PMCMASK(nonhw,  0x00),
2195         PMCMASK(hw,     0x10),
2196         PMCMASK(both,   0x30),
2197         NULLMASK
2198 };
2199
2200 static struct pmc_masks p6_mask_hw[] = {
2201         PMCMASK(nonhw,  0x00),
2202         PMCMASK(hw,     0x10),
2203         PMCMASK(both,   0x30),
2204         NULLMASK
2205 };
2206
2207 static struct pmc_masks p6_mask_any[] = {
2208         PMCMASK(self,   0x00),
2209         PMCMASK(any,    0x20),
2210         NULLMASK
2211 };
2212
2213 static struct pmc_masks p6_mask_ekp[] = {
2214         PMCMASK(nta,    0x00),
2215         PMCMASK(t1,     0x01),
2216         PMCMASK(t2,     0x02),
2217         PMCMASK(wos,    0x03),
2218         NULLMASK
2219 };
2220
2221 static struct pmc_masks p6_mask_pps[] = {
2222         PMCMASK(packed-and-scalar, 0x00),
2223         PMCMASK(scalar, 0x01),
2224         NULLMASK
2225 };
2226
2227 static struct pmc_masks p6_mask_mite[] = {
2228         PMCMASK(packed-multiply,         0x01),
2229         PMCMASK(packed-shift,           0x02),
2230         PMCMASK(pack,                   0x04),
2231         PMCMASK(unpack,                 0x08),
2232         PMCMASK(packed-logical,         0x10),
2233         PMCMASK(packed-arithmetic,      0x20),
2234         NULLMASK
2235 };
2236
2237 static struct pmc_masks p6_mask_fmt[] = {
2238         PMCMASK(mmxtofp,        0x00),
2239         PMCMASK(fptommx,        0x01),
2240         NULLMASK
2241 };
2242
2243 static struct pmc_masks p6_mask_sr[] = {
2244         PMCMASK(es,     0x01),
2245         PMCMASK(ds,     0x02),
2246         PMCMASK(fs,     0x04),
2247         PMCMASK(gs,     0x08),
2248         NULLMASK
2249 };
2250
2251 static struct pmc_masks p6_mask_eet[] = {
2252         PMCMASK(all,    0x00),
2253         PMCMASK(freq,   0x02),
2254         NULLMASK
2255 };
2256
2257 static struct pmc_masks p6_mask_efur[] = {
2258         PMCMASK(all,    0x00),
2259         PMCMASK(loadop, 0x01),
2260         PMCMASK(stdsta, 0x02),
2261         NULLMASK
2262 };
2263
2264 static struct pmc_masks p6_mask_essir[] = {
2265         PMCMASK(sse-packed-single,      0x00),
2266         PMCMASK(sse-packed-single-scalar-single, 0x01),
2267         PMCMASK(sse2-packed-double,     0x02),
2268         PMCMASK(sse2-scalar-double,     0x03),
2269         NULLMASK
2270 };
2271
2272 static struct pmc_masks p6_mask_esscir[] = {
2273         PMCMASK(sse-packed-single,      0x00),
2274         PMCMASK(sse-scalar-single,      0x01),
2275         PMCMASK(sse2-packed-double,     0x02),
2276         PMCMASK(sse2-scalar-double,     0x03),
2277         NULLMASK
2278 };
2279
2280 /* P6 event parser */
2281 static int
2282 p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
2283     struct pmc_op_pmcallocate *pmc_config)
2284 {
2285         char *e, *p, *q;
2286         uint64_t evmask;
2287         int count, n;
2288         const struct pmc_masks *pm, *pmask;
2289
2290         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2291         pmc_config->pm_md.pm_ppro.pm_ppro_config = 0;
2292
2293         evmask = 0;
2294
2295 #define P6MASKSET(M)    pmask = p6_mask_ ## M
2296
2297         switch(pe) {
2298         case PMC_EV_P6_L2_IFETCH:       P6MASKSET(mesi); break;
2299         case PMC_EV_P6_L2_LD:           P6MASKSET(mesi); break;
2300         case PMC_EV_P6_L2_ST:           P6MASKSET(mesi); break;
2301         case PMC_EV_P6_L2_RQSTS:        P6MASKSET(mesi); break;
2302         case PMC_EV_P6_BUS_DRDY_CLOCKS:
2303         case PMC_EV_P6_BUS_LOCK_CLOCKS:
2304         case PMC_EV_P6_BUS_TRAN_BRD:
2305         case PMC_EV_P6_BUS_TRAN_RFO:
2306         case PMC_EV_P6_BUS_TRANS_WB:
2307         case PMC_EV_P6_BUS_TRAN_IFETCH:
2308         case PMC_EV_P6_BUS_TRAN_INVAL:
2309         case PMC_EV_P6_BUS_TRAN_PWR:
2310         case PMC_EV_P6_BUS_TRANS_P:
2311         case PMC_EV_P6_BUS_TRANS_IO:
2312         case PMC_EV_P6_BUS_TRAN_DEF:
2313         case PMC_EV_P6_BUS_TRAN_BURST:
2314         case PMC_EV_P6_BUS_TRAN_ANY:
2315         case PMC_EV_P6_BUS_TRAN_MEM:
2316                 P6MASKSET(any); break;
2317         case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
2318         case PMC_EV_P6_EMON_KNI_PREF_MISS:
2319                 P6MASKSET(ekp); break;
2320         case PMC_EV_P6_EMON_KNI_INST_RETIRED:
2321         case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
2322                 P6MASKSET(pps); break;
2323         case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
2324                 P6MASKSET(mite); break;
2325         case PMC_EV_P6_FP_MMX_TRANS:
2326                 P6MASKSET(fmt); break;
2327         case PMC_EV_P6_SEG_RENAME_STALLS:
2328         case PMC_EV_P6_SEG_REG_RENAMES:
2329                 P6MASKSET(sr);  break;
2330         case PMC_EV_P6_EMON_EST_TRANS:
2331                 P6MASKSET(eet); break;
2332         case PMC_EV_P6_EMON_FUSED_UOPS_RET:
2333                 P6MASKSET(efur); break;
2334         case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
2335                 P6MASKSET(essir); break;
2336         case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
2337                 P6MASKSET(esscir); break;
2338         default:
2339                 pmask = NULL;
2340                 break;
2341         }
2342
2343         /* Pentium M PMCs have a few events with different semantics */
2344         if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
2345                 if (pe == PMC_EV_P6_L2_LD ||
2346                     pe == PMC_EV_P6_L2_LINES_IN ||
2347                     pe == PMC_EV_P6_L2_LINES_OUT)
2348                         P6MASKSET(mesihw);
2349                 else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
2350                         P6MASKSET(hw);
2351         }
2352
2353         /* Parse additional modifiers if present */
2354         while ((p = strsep(&ctrspec, ",")) != NULL) {
2355                 if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
2356                         q = strchr(p, '=');
2357                         if (*++q == '\0') /* skip '=' */
2358                                 return (-1);
2359                         count = strtol(q, &e, 0);
2360                         if (e == q || *e != '\0')
2361                                 return (-1);
2362                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
2363                         pmc_config->pm_md.pm_ppro.pm_ppro_config |=
2364                             P6_EVSEL_TO_CMASK(count);
2365                 } else if (KWMATCH(p, P6_KW_EDGE)) {
2366                         pmc_config->pm_caps |= PMC_CAP_EDGE;
2367                 } else if (KWMATCH(p, P6_KW_INV)) {
2368                         pmc_config->pm_caps |= PMC_CAP_INVERT;
2369                 } else if (KWMATCH(p, P6_KW_OS)) {
2370                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2371                 } else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
2372                         evmask = 0;
2373                         if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
2374                                 return (-1);
2375                         if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
2376                              pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
2377                              pe == PMC_EV_P6_BUS_TRAN_BRD ||
2378                              pe == PMC_EV_P6_BUS_TRAN_RFO ||
2379                              pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
2380                              pe == PMC_EV_P6_BUS_TRAN_INVAL ||
2381                              pe == PMC_EV_P6_BUS_TRAN_PWR ||
2382                              pe == PMC_EV_P6_BUS_TRAN_DEF ||
2383                              pe == PMC_EV_P6_BUS_TRAN_BURST ||
2384                              pe == PMC_EV_P6_BUS_TRAN_ANY ||
2385                              pe == PMC_EV_P6_BUS_TRAN_MEM ||
2386                              pe == PMC_EV_P6_BUS_TRANS_IO ||
2387                              pe == PMC_EV_P6_BUS_TRANS_P ||
2388                              pe == PMC_EV_P6_BUS_TRANS_WB ||
2389                              pe == PMC_EV_P6_EMON_EST_TRANS ||
2390                              pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
2391                              pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
2392                              pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
2393                              pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
2394                              pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
2395                              pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
2396                              pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
2397                              pe == PMC_EV_P6_FP_MMX_TRANS)
2398                             && (n > 1)) /* Only one mask keyword is allowed. */
2399                                 return (-1);
2400                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2401                 } else if (KWMATCH(p, P6_KW_USR)) {
2402                         pmc_config->pm_caps |= PMC_CAP_USER;
2403                 } else
2404                         return (-1);
2405         }
2406
2407         /* post processing */
2408         switch (pe) {
2409
2410                 /*
2411                  * The following events default to an evmask of 0
2412                  */
2413
2414                 /* default => 'self' */
2415         case PMC_EV_P6_BUS_DRDY_CLOCKS:
2416         case PMC_EV_P6_BUS_LOCK_CLOCKS:
2417         case PMC_EV_P6_BUS_TRAN_BRD:
2418         case PMC_EV_P6_BUS_TRAN_RFO:
2419         case PMC_EV_P6_BUS_TRANS_WB:
2420         case PMC_EV_P6_BUS_TRAN_IFETCH:
2421         case PMC_EV_P6_BUS_TRAN_INVAL:
2422         case PMC_EV_P6_BUS_TRAN_PWR:
2423         case PMC_EV_P6_BUS_TRANS_P:
2424         case PMC_EV_P6_BUS_TRANS_IO:
2425         case PMC_EV_P6_BUS_TRAN_DEF:
2426         case PMC_EV_P6_BUS_TRAN_BURST:
2427         case PMC_EV_P6_BUS_TRAN_ANY:
2428         case PMC_EV_P6_BUS_TRAN_MEM:
2429
2430                 /* default => 'nta' */
2431         case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
2432         case PMC_EV_P6_EMON_KNI_PREF_MISS:
2433
2434                 /* default => 'packed and scalar' */
2435         case PMC_EV_P6_EMON_KNI_INST_RETIRED:
2436         case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
2437
2438                 /* default => 'mmx to fp transitions' */
2439         case PMC_EV_P6_FP_MMX_TRANS:
2440
2441                 /* default => 'SSE Packed Single' */
2442         case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
2443         case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
2444
2445                 /* default => 'all fused micro-ops' */
2446         case PMC_EV_P6_EMON_FUSED_UOPS_RET:
2447
2448                 /* default => 'all transitions' */
2449         case PMC_EV_P6_EMON_EST_TRANS:
2450                 break;
2451
2452         case PMC_EV_P6_MMX_UOPS_EXEC:
2453                 evmask = 0x0F;          /* only value allowed */
2454                 break;
2455
2456         default:
2457                 /*
2458                  * For all other events, set the default event mask
2459                  * to a logical OR of all the allowed event mask bits.
2460                  */
2461                 if (evmask == 0 && pmask) {
2462                         for (pm = pmask; pm->pm_name; pm++)
2463                                 evmask |= pm->pm_value;
2464                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2465                 }
2466
2467                 break;
2468         }
2469
2470         if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
2471                 pmc_config->pm_md.pm_ppro.pm_ppro_config |=
2472                     P6_EVSEL_TO_UMASK(evmask);
2473
2474         return (0);
2475 }
2476
2477 #endif
2478
2479 #if     defined(__i386__) || defined(__amd64__)
2480 static int
2481 tsc_allocate_pmc(enum pmc_event pe, char *ctrspec,
2482     struct pmc_op_pmcallocate *pmc_config)
2483 {
2484         if (pe != PMC_EV_TSC_TSC)
2485                 return (-1);
2486
2487         /* TSC events must be unqualified. */
2488         if (ctrspec && *ctrspec != '\0')
2489                 return (-1);
2490
2491         pmc_config->pm_md.pm_amd.pm_amd_config = 0;
2492         pmc_config->pm_caps |= PMC_CAP_READ;
2493
2494         return (0);
2495 }
2496 #endif
2497
2498 static struct pmc_event_alias generic_aliases[] = {
2499         EV_ALIAS("instructions",                "SOFT-CLOCK.HARD"),
2500         EV_ALIAS(NULL, NULL)
2501 };
2502
2503 static int
2504 soft_allocate_pmc(enum pmc_event pe, char *ctrspec,
2505     struct pmc_op_pmcallocate *pmc_config)
2506 {
2507         (void)ctrspec;
2508         (void)pmc_config;
2509
2510         if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST)
2511                 return (-1);
2512
2513         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2514         return (0);
2515 }
2516
2517 #if     defined(__arm__)
2518 #if     defined(__XSCALE__)
2519
2520 static struct pmc_event_alias xscale_aliases[] = {
2521         EV_ALIAS("branches",            "BRANCH_RETIRED"),
2522         EV_ALIAS("branch-mispredicts",  "BRANCH_MISPRED"),
2523         EV_ALIAS("dc-misses",           "DC_MISS"),
2524         EV_ALIAS("ic-misses",           "IC_MISS"),
2525         EV_ALIAS("instructions",        "INSTR_RETIRED"),
2526         EV_ALIAS(NULL, NULL)
2527 };
2528 static int
2529 xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2530     struct pmc_op_pmcallocate *pmc_config __unused)
2531 {
2532         switch (pe) {
2533         default:
2534                 break;
2535         }
2536
2537         return (0);
2538 }
2539 #endif
2540
2541 static struct pmc_event_alias cortex_a8_aliases[] = {
2542         EV_ALIAS("dc-misses",           "L1_DCACHE_REFILL"),
2543         EV_ALIAS("ic-misses",           "L1_ICACHE_REFILL"),
2544         EV_ALIAS("instructions",        "INSTR_EXECUTED"),
2545         EV_ALIAS(NULL, NULL)
2546 };
2547
2548 static struct pmc_event_alias cortex_a9_aliases[] = {
2549         EV_ALIAS("dc-misses",           "L1_DCACHE_REFILL"),
2550         EV_ALIAS("ic-misses",           "L1_ICACHE_REFILL"),
2551         EV_ALIAS("instructions",        "INSTR_EXECUTED"),
2552         EV_ALIAS(NULL, NULL)
2553 };
2554
2555 static int
2556 armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2557     struct pmc_op_pmcallocate *pmc_config __unused)
2558 {
2559         switch (pe) {
2560         default:
2561                 break;
2562         }
2563
2564         return (0);
2565 }
2566 #endif
2567
2568 #if     defined(__aarch64__)
2569 static struct pmc_event_alias cortex_a53_aliases[] = {
2570         EV_ALIAS(NULL, NULL)
2571 };
2572 static struct pmc_event_alias cortex_a57_aliases[] = {
2573         EV_ALIAS(NULL, NULL)
2574 };
2575 static int
2576 arm64_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2577     struct pmc_op_pmcallocate *pmc_config __unused)
2578 {
2579         switch (pe) {
2580         default:
2581                 break;
2582         }
2583
2584         return (0);
2585 }
2586 #endif
2587
2588 #if defined(__mips__)
2589
2590 static struct pmc_event_alias mips24k_aliases[] = {
2591         EV_ALIAS("instructions",        "INSTR_EXECUTED"),
2592         EV_ALIAS("branches",            "BRANCH_COMPLETED"),
2593         EV_ALIAS("branch-mispredicts",  "BRANCH_MISPRED"),
2594         EV_ALIAS(NULL, NULL)
2595 };
2596
2597 static struct pmc_event_alias mips74k_aliases[] = {
2598         EV_ALIAS("instructions",        "INSTR_EXECUTED"),
2599         EV_ALIAS("branches",            "BRANCH_INSNS"),
2600         EV_ALIAS("branch-mispredicts",  "MISPREDICTED_BRANCH_INSNS"),
2601         EV_ALIAS(NULL, NULL)
2602 };
2603
2604 static struct pmc_event_alias octeon_aliases[] = {
2605         EV_ALIAS("instructions",        "RET"),
2606         EV_ALIAS("branches",            "BR"),
2607         EV_ALIAS("branch-mispredicts",  "BRMIS"),
2608         EV_ALIAS(NULL, NULL)
2609 };
2610
2611 #define MIPS_KW_OS              "os"
2612 #define MIPS_KW_USR             "usr"
2613 #define MIPS_KW_ANYTHREAD       "anythread"
2614
2615 static int
2616 mips_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2617                   struct pmc_op_pmcallocate *pmc_config __unused)
2618 {
2619         char *p;
2620
2621         (void) pe;
2622
2623         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2624         
2625         while ((p = strsep(&ctrspec, ",")) != NULL) {
2626                 if (KWMATCH(p, MIPS_KW_OS))
2627                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2628                 else if (KWMATCH(p, MIPS_KW_USR))
2629                         pmc_config->pm_caps |= PMC_CAP_USER;
2630                 else if (KWMATCH(p, MIPS_KW_ANYTHREAD))
2631                         pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
2632                 else
2633                         return (-1);
2634         }
2635
2636         return (0);
2637 }
2638
2639 #endif /* __mips__ */
2640
2641 #if defined(__powerpc__)
2642
2643 static struct pmc_event_alias ppc7450_aliases[] = {
2644         EV_ALIAS("instructions",        "INSTR_COMPLETED"),
2645         EV_ALIAS("branches",            "BRANCHES_COMPLETED"),
2646         EV_ALIAS("branch-mispredicts",  "MISPREDICTED_BRANCHES"),
2647         EV_ALIAS(NULL, NULL)
2648 };
2649
2650 static struct pmc_event_alias ppc970_aliases[] = {
2651         EV_ALIAS("instructions", "INSTR_COMPLETED"),
2652         EV_ALIAS("cycles",       "CYCLES"),
2653         EV_ALIAS(NULL, NULL)
2654 };
2655
2656 static struct pmc_event_alias e500_aliases[] = {
2657         EV_ALIAS("instructions", "INSTR_COMPLETED"),
2658         EV_ALIAS("cycles",       "CYCLES"),
2659         EV_ALIAS(NULL, NULL)
2660 };
2661
2662 #define POWERPC_KW_OS           "os"
2663 #define POWERPC_KW_USR          "usr"
2664 #define POWERPC_KW_ANYTHREAD    "anythread"
2665
2666 static int
2667 powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2668                      struct pmc_op_pmcallocate *pmc_config __unused)
2669 {
2670         char *p;
2671
2672         (void) pe;
2673
2674         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2675         
2676         while ((p = strsep(&ctrspec, ",")) != NULL) {
2677                 if (KWMATCH(p, POWERPC_KW_OS))
2678                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2679                 else if (KWMATCH(p, POWERPC_KW_USR))
2680                         pmc_config->pm_caps |= PMC_CAP_USER;
2681                 else if (KWMATCH(p, POWERPC_KW_ANYTHREAD))
2682                         pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
2683                 else
2684                         return (-1);
2685         }
2686
2687         return (0);
2688 }
2689
2690 #endif /* __powerpc__ */
2691
2692
2693 /*
2694  * Match an event name `name' with its canonical form.
2695  *
2696  * Matches are case insensitive and spaces, periods, underscores and
2697  * hyphen characters are considered to match each other.
2698  *
2699  * Returns 1 for a match, 0 otherwise.
2700  */
2701
2702 static int
2703 pmc_match_event_name(const char *name, const char *canonicalname)
2704 {
2705         int cc, nc;
2706         const unsigned char *c, *n;
2707
2708         c = (const unsigned char *) canonicalname;
2709         n = (const unsigned char *) name;
2710
2711         for (; (nc = *n) && (cc = *c); n++, c++) {
2712
2713                 if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
2714                     (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
2715                         continue;
2716
2717                 if (toupper(nc) == toupper(cc))
2718                         continue;
2719
2720
2721                 return (0);
2722         }
2723
2724         if (*n == '\0' && *c == '\0')
2725                 return (1);
2726
2727         return (0);
2728 }
2729
2730 /*
2731  * Match an event name against all the event named supported by a
2732  * PMC class.
2733  *
2734  * Returns an event descriptor pointer on match or NULL otherwise.
2735  */
2736 static const struct pmc_event_descr *
2737 pmc_match_event_class(const char *name,
2738     const struct pmc_class_descr *pcd)
2739 {
2740         size_t n;
2741         const struct pmc_event_descr *ev;
2742
2743         ev = pcd->pm_evc_event_table;
2744         for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
2745                 if (pmc_match_event_name(name, ev->pm_ev_name))
2746                         return (ev);
2747
2748         return (NULL);
2749 }
2750
2751 static int
2752 pmc_mdep_is_compatible_class(enum pmc_class pc)
2753 {
2754         size_t n;
2755
2756         for (n = 0; n < pmc_mdep_class_list_size; n++)
2757                 if (pmc_mdep_class_list[n] == pc)
2758                         return (1);
2759         return (0);
2760 }
2761
2762 /*
2763  * API entry points
2764  */
2765
2766 int
2767 pmc_allocate(const char *ctrspec, enum pmc_mode mode,
2768     uint32_t flags, int cpu, pmc_id_t *pmcid)
2769 {
2770         size_t n;
2771         int retval;
2772         char *r, *spec_copy;
2773         const char *ctrname;
2774         const struct pmc_event_descr *ev;
2775         const struct pmc_event_alias *alias;
2776         struct pmc_op_pmcallocate pmc_config;
2777         const struct pmc_class_descr *pcd;
2778
2779         spec_copy = NULL;
2780         retval    = -1;
2781
2782         if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
2783             mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
2784                 errno = EINVAL;
2785                 goto out;
2786         }
2787         bzero(&pmc_config, sizeof(pmc_config));
2788         pmc_config.pm_cpu   = cpu;
2789         pmc_config.pm_mode  = mode;
2790         pmc_config.pm_flags = flags;
2791         if (PMC_IS_SAMPLING_MODE(mode))
2792                 pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
2793         /*
2794          * Can we pull this straight from the pmu table?
2795          */
2796         r = spec_copy = strdup(ctrspec);
2797         ctrname = strsep(&r, ",");
2798         if (pmc_pmu_pmcallocate(ctrname, &pmc_config) == 0) {
2799                 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) {
2800                         goto out;
2801                 }
2802                 retval = 0;
2803                 *pmcid = pmc_config.pm_pmcid;
2804                 goto out;
2805         } else {
2806                 free(spec_copy);
2807                 spec_copy = NULL;
2808         }
2809
2810         /* replace an event alias with the canonical event specifier */
2811         if (pmc_mdep_event_aliases)
2812                 for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++)
2813                         if (!strcasecmp(ctrspec, alias->pm_alias)) {
2814                                 spec_copy = strdup(alias->pm_spec);
2815                                 break;
2816                         }
2817
2818         if (spec_copy == NULL)
2819                 spec_copy = strdup(ctrspec);
2820
2821         r = spec_copy;
2822         ctrname = strsep(&r, ",");
2823
2824         /*
2825          * If a explicit class prefix was given by the user, restrict the
2826          * search for the event to the specified PMC class.
2827          */
2828         ev = NULL;
2829         for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
2830                 pcd = pmc_class_table[n];
2831                 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) &&
2832                     strncasecmp(ctrname, pcd->pm_evc_name,
2833                                 pcd->pm_evc_name_size) == 0) {
2834                         if ((ev = pmc_match_event_class(ctrname +
2835                             pcd->pm_evc_name_size, pcd)) == NULL) {
2836                                 errno = EINVAL;
2837                                 goto out;
2838                         }
2839                         break;
2840                 }
2841         }
2842
2843         /*
2844          * Otherwise, search for this event in all compatible PMC
2845          * classes.
2846          */
2847         for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
2848                 pcd = pmc_class_table[n];
2849                 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class))
2850                         ev = pmc_match_event_class(ctrname, pcd);
2851         }
2852
2853         if (ev == NULL) {
2854                 errno = EINVAL;
2855                 goto out;
2856         }
2857
2858         pmc_config.pm_ev    = ev->pm_ev_code;
2859         pmc_config.pm_class = pcd->pm_evc_class;
2860
2861         if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) {
2862                 errno = EINVAL;
2863                 goto out;
2864         }
2865
2866         if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
2867                 goto out;
2868
2869         *pmcid = pmc_config.pm_pmcid;
2870
2871         retval = 0;
2872
2873  out:
2874         if (spec_copy)
2875                 free(spec_copy);
2876
2877         return (retval);
2878 }
2879
2880 int
2881 pmc_attach(pmc_id_t pmc, pid_t pid)
2882 {
2883         struct pmc_op_pmcattach pmc_attach_args;
2884
2885         pmc_attach_args.pm_pmc = pmc;
2886         pmc_attach_args.pm_pid = pid;
2887
2888         return (PMC_CALL(PMCATTACH, &pmc_attach_args));
2889 }
2890
2891 int
2892 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
2893 {
2894         unsigned int i;
2895         enum pmc_class cl;
2896
2897         cl = PMC_ID_TO_CLASS(pmcid);
2898         for (i = 0; i < cpu_info.pm_nclass; i++)
2899                 if (cpu_info.pm_classes[i].pm_class == cl) {
2900                         *caps = cpu_info.pm_classes[i].pm_caps;
2901                         return (0);
2902                 }
2903         errno = EINVAL;
2904         return (-1);
2905 }
2906
2907 int
2908 pmc_configure_logfile(int fd)
2909 {
2910         struct pmc_op_configurelog cla;
2911
2912         cla.pm_logfd = fd;
2913         if (PMC_CALL(CONFIGURELOG, &cla) < 0)
2914                 return (-1);
2915         return (0);
2916 }
2917
2918 int
2919 pmc_cpuinfo(const struct pmc_cpuinfo **pci)
2920 {
2921         if (pmc_syscall == -1) {
2922                 errno = ENXIO;
2923                 return (-1);
2924         }
2925
2926         *pci = &cpu_info;
2927         return (0);
2928 }
2929
2930 int
2931 pmc_detach(pmc_id_t pmc, pid_t pid)
2932 {
2933         struct pmc_op_pmcattach pmc_detach_args;
2934
2935         pmc_detach_args.pm_pmc = pmc;
2936         pmc_detach_args.pm_pid = pid;
2937         return (PMC_CALL(PMCDETACH, &pmc_detach_args));
2938 }
2939
2940 int
2941 pmc_disable(int cpu, int pmc)
2942 {
2943         struct pmc_op_pmcadmin ssa;
2944
2945         ssa.pm_cpu = cpu;
2946         ssa.pm_pmc = pmc;
2947         ssa.pm_state = PMC_STATE_DISABLED;
2948         return (PMC_CALL(PMCADMIN, &ssa));
2949 }
2950
2951 int
2952 pmc_enable(int cpu, int pmc)
2953 {
2954         struct pmc_op_pmcadmin ssa;
2955
2956         ssa.pm_cpu = cpu;
2957         ssa.pm_pmc = pmc;
2958         ssa.pm_state = PMC_STATE_FREE;
2959         return (PMC_CALL(PMCADMIN, &ssa));
2960 }
2961
2962 /*
2963  * Return a list of events known to a given PMC class.  'cl' is the
2964  * PMC class identifier, 'eventnames' is the returned list of 'const
2965  * char *' pointers pointing to the names of the events. 'nevents' is
2966  * the number of event name pointers returned.
2967  *
2968  * The space for 'eventnames' is allocated using malloc(3).  The caller
2969  * is responsible for freeing this space when done.
2970  */
2971 int
2972 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
2973     int *nevents)
2974 {
2975         int count;
2976         const char **names;
2977         const struct pmc_event_descr *ev;
2978
2979         switch (cl)
2980         {
2981         case PMC_CLASS_IAF:
2982                 ev = iaf_event_table;
2983                 count = PMC_EVENT_TABLE_SIZE(iaf);
2984                 break;
2985         case PMC_CLASS_IAP:
2986                 /*
2987                  * Return the most appropriate set of event name
2988                  * spellings for the current CPU.
2989                  */
2990                 switch (cpu_info.pm_cputype) {
2991                 default:
2992                 case PMC_CPU_INTEL_ATOM:
2993                         ev = atom_event_table;
2994                         count = PMC_EVENT_TABLE_SIZE(atom);
2995                         break;
2996                 case PMC_CPU_INTEL_ATOM_SILVERMONT:
2997                         ev = atom_silvermont_event_table;
2998                         count = PMC_EVENT_TABLE_SIZE(atom_silvermont);
2999                         break;
3000                 case PMC_CPU_INTEL_CORE:
3001                         ev = core_event_table;
3002                         count = PMC_EVENT_TABLE_SIZE(core);
3003                         break;
3004                 case PMC_CPU_INTEL_CORE2:
3005                 case PMC_CPU_INTEL_CORE2EXTREME:
3006                         ev = core2_event_table;
3007                         count = PMC_EVENT_TABLE_SIZE(core2);
3008                         break;
3009                 case PMC_CPU_INTEL_COREI7:
3010                         ev = corei7_event_table;
3011                         count = PMC_EVENT_TABLE_SIZE(corei7);
3012                         break;
3013                 case PMC_CPU_INTEL_NEHALEM_EX:
3014                         ev = nehalem_ex_event_table;
3015                         count = PMC_EVENT_TABLE_SIZE(nehalem_ex);
3016                         break;
3017                 case PMC_CPU_INTEL_HASWELL:
3018                         ev = haswell_event_table;
3019                         count = PMC_EVENT_TABLE_SIZE(haswell);
3020                         break;
3021                 case PMC_CPU_INTEL_HASWELL_XEON:
3022                         ev = haswell_xeon_event_table;
3023                         count = PMC_EVENT_TABLE_SIZE(haswell_xeon);
3024                         break;
3025                 case PMC_CPU_INTEL_BROADWELL:
3026                         ev = broadwell_event_table;
3027                         count = PMC_EVENT_TABLE_SIZE(broadwell);
3028                         break;
3029                 case PMC_CPU_INTEL_BROADWELL_XEON:
3030                         ev = broadwell_xeon_event_table;
3031                         count = PMC_EVENT_TABLE_SIZE(broadwell_xeon);
3032                         break;
3033                 case PMC_CPU_INTEL_SKYLAKE:
3034                         ev = skylake_event_table;
3035                         count = PMC_EVENT_TABLE_SIZE(skylake);
3036                         break;
3037                 case PMC_CPU_INTEL_SKYLAKE_XEON:
3038                         ev = skylake_xeon_event_table;
3039                         count = PMC_EVENT_TABLE_SIZE(skylake_xeon);
3040                         break;
3041                 case PMC_CPU_INTEL_IVYBRIDGE:
3042                         ev = ivybridge_event_table;
3043                         count = PMC_EVENT_TABLE_SIZE(ivybridge);
3044                         break;
3045                 case PMC_CPU_INTEL_IVYBRIDGE_XEON:
3046                         ev = ivybridge_xeon_event_table;
3047                         count = PMC_EVENT_TABLE_SIZE(ivybridge_xeon);
3048                         break;
3049                 case PMC_CPU_INTEL_SANDYBRIDGE:
3050                         ev = sandybridge_event_table;
3051                         count = PMC_EVENT_TABLE_SIZE(sandybridge);
3052                         break;
3053                 case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
3054                         ev = sandybridge_xeon_event_table;
3055                         count = PMC_EVENT_TABLE_SIZE(sandybridge_xeon);
3056                         break;
3057                 case PMC_CPU_INTEL_WESTMERE:
3058                         ev = westmere_event_table;
3059                         count = PMC_EVENT_TABLE_SIZE(westmere);
3060                         break;
3061                 case PMC_CPU_INTEL_WESTMERE_EX:
3062                         ev = westmere_ex_event_table;
3063                         count = PMC_EVENT_TABLE_SIZE(westmere_ex);
3064                         break;
3065                 }
3066                 break;
3067         case PMC_CLASS_UCF:
3068                 ev = ucf_event_table;
3069                 count = PMC_EVENT_TABLE_SIZE(ucf);
3070                 break;
3071         case PMC_CLASS_UCP:
3072                 /*
3073                  * Return the most appropriate set of event name
3074                  * spellings for the current CPU.
3075                  */
3076                 switch (cpu_info.pm_cputype) {
3077                 default:
3078                 case PMC_CPU_INTEL_COREI7:
3079                         ev = corei7uc_event_table;
3080                         count = PMC_EVENT_TABLE_SIZE(corei7uc);
3081                         break;
3082                 case PMC_CPU_INTEL_HASWELL:
3083                         ev = haswelluc_event_table;
3084                         count = PMC_EVENT_TABLE_SIZE(haswelluc);
3085                         break;
3086                 case PMC_CPU_INTEL_BROADWELL:
3087                         ev = broadwelluc_event_table;
3088                         count = PMC_EVENT_TABLE_SIZE(broadwelluc);
3089                         break;
3090                 case PMC_CPU_INTEL_SANDYBRIDGE:
3091                         ev = sandybridgeuc_event_table;
3092                         count = PMC_EVENT_TABLE_SIZE(sandybridgeuc);
3093                         break;
3094                 case PMC_CPU_INTEL_WESTMERE:
3095                         ev = westmereuc_event_table;
3096                         count = PMC_EVENT_TABLE_SIZE(westmereuc);
3097                         break;
3098                 }
3099                 break;
3100         case PMC_CLASS_TSC:
3101                 ev = tsc_event_table;
3102                 count = PMC_EVENT_TABLE_SIZE(tsc);
3103                 break;
3104         case PMC_CLASS_K7:
3105                 ev = k7_event_table;
3106                 count = PMC_EVENT_TABLE_SIZE(k7);
3107                 break;
3108         case PMC_CLASS_K8:
3109                 ev = k8_event_table;
3110                 count = PMC_EVENT_TABLE_SIZE(k8);
3111                 break;
3112         case PMC_CLASS_P4:
3113                 ev = p4_event_table;
3114                 count = PMC_EVENT_TABLE_SIZE(p4);
3115                 break;
3116         case PMC_CLASS_P5:
3117                 ev = p5_event_table;
3118                 count = PMC_EVENT_TABLE_SIZE(p5);
3119                 break;
3120         case PMC_CLASS_P6:
3121                 ev = p6_event_table;
3122                 count = PMC_EVENT_TABLE_SIZE(p6);
3123                 break;
3124         case PMC_CLASS_XSCALE:
3125                 ev = xscale_event_table;
3126                 count = PMC_EVENT_TABLE_SIZE(xscale);
3127                 break;
3128         case PMC_CLASS_ARMV7:
3129                 switch (cpu_info.pm_cputype) {
3130                 default:
3131                 case PMC_CPU_ARMV7_CORTEX_A8:
3132                         ev = cortex_a8_event_table;
3133                         count = PMC_EVENT_TABLE_SIZE(cortex_a8);
3134                         break;
3135                 case PMC_CPU_ARMV7_CORTEX_A9:
3136                         ev = cortex_a9_event_table;
3137                         count = PMC_EVENT_TABLE_SIZE(cortex_a9);
3138                         break;
3139                 }
3140                 break;
3141         case PMC_CLASS_ARMV8:
3142                 switch (cpu_info.pm_cputype) {
3143                 default:
3144                 case PMC_CPU_ARMV8_CORTEX_A53:
3145                         ev = cortex_a53_event_table;
3146                         count = PMC_EVENT_TABLE_SIZE(cortex_a53);
3147                         break;
3148                 case PMC_CPU_ARMV8_CORTEX_A57:
3149                         ev = cortex_a57_event_table;
3150                         count = PMC_EVENT_TABLE_SIZE(cortex_a57);
3151                         break;
3152                 }
3153                 break;
3154         case PMC_CLASS_MIPS24K:
3155                 ev = mips24k_event_table;
3156                 count = PMC_EVENT_TABLE_SIZE(mips24k);
3157                 break;
3158         case PMC_CLASS_MIPS74K:
3159                 ev = mips74k_event_table;
3160                 count = PMC_EVENT_TABLE_SIZE(mips74k);
3161                 break;
3162         case PMC_CLASS_OCTEON:
3163                 ev = octeon_event_table;
3164                 count = PMC_EVENT_TABLE_SIZE(octeon);
3165                 break;
3166         case PMC_CLASS_PPC7450:
3167                 ev = ppc7450_event_table;
3168                 count = PMC_EVENT_TABLE_SIZE(ppc7450);
3169                 break;
3170         case PMC_CLASS_PPC970:
3171                 ev = ppc970_event_table;
3172                 count = PMC_EVENT_TABLE_SIZE(ppc970);
3173                 break;
3174         case PMC_CLASS_E500:
3175                 ev = e500_event_table;
3176                 count = PMC_EVENT_TABLE_SIZE(e500);
3177                 break;
3178         case PMC_CLASS_SOFT:
3179                 ev = soft_event_table;
3180                 count = soft_event_info.pm_nevent;
3181                 break;
3182         default:
3183                 errno = EINVAL;
3184                 return (-1);
3185         }
3186
3187         if ((names = malloc(count * sizeof(const char *))) == NULL)
3188                 return (-1);
3189
3190         *eventnames = names;
3191         *nevents = count;
3192
3193         for (;count--; ev++, names++)
3194                 *names = ev->pm_ev_name;
3195
3196         return (0);
3197 }
3198
3199 int
3200 pmc_flush_logfile(void)
3201 {
3202         return (PMC_CALL(FLUSHLOG,0));
3203 }
3204
3205 int
3206 pmc_close_logfile(void)
3207 {
3208         return (PMC_CALL(CLOSELOG,0));
3209 }
3210
3211 int
3212 pmc_get_driver_stats(struct pmc_driverstats *ds)
3213 {
3214         struct pmc_op_getdriverstats gms;
3215
3216         if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
3217                 return (-1);
3218
3219         /* copy out fields in the current userland<->library interface */
3220         ds->pm_intr_ignored    = gms.pm_intr_ignored;
3221         ds->pm_intr_processed  = gms.pm_intr_processed;
3222         ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
3223         ds->pm_syscalls        = gms.pm_syscalls;
3224         ds->pm_syscall_errors  = gms.pm_syscall_errors;
3225         ds->pm_buffer_requests = gms.pm_buffer_requests;
3226         ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
3227         ds->pm_log_sweeps      = gms.pm_log_sweeps;
3228         return (0);
3229 }
3230
3231 int
3232 pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
3233 {
3234         struct pmc_op_getmsr gm;
3235
3236         gm.pm_pmcid = pmc;
3237         if (PMC_CALL(PMCGETMSR, &gm) < 0)
3238                 return (-1);
3239         *msr = gm.pm_msr;
3240         return (0);
3241 }
3242
3243 int
3244 pmc_init(void)
3245 {
3246         int error, pmc_mod_id;
3247         unsigned int n;
3248         uint32_t abi_version;
3249         struct module_stat pmc_modstat;
3250         struct pmc_op_getcpuinfo op_cpu_info;
3251 #if defined(__amd64__) || defined(__i386__)
3252         int cpu_has_iaf_counters;
3253         unsigned int t;
3254 #endif
3255
3256         if (pmc_syscall != -1) /* already inited */
3257                 return (0);
3258
3259         /* retrieve the system call number from the KLD */
3260         if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
3261                 return (-1);
3262
3263         pmc_modstat.version = sizeof(struct module_stat);
3264         if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
3265                 return (-1);
3266
3267         pmc_syscall = pmc_modstat.data.intval;
3268
3269         /* check the kernel module's ABI against our compiled-in version */
3270         abi_version = PMC_VERSION;
3271         if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
3272                 return (pmc_syscall = -1);
3273
3274         /* ignore patch & minor numbers for the comparison */
3275         if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
3276                 errno  = EPROGMISMATCH;
3277                 return (pmc_syscall = -1);
3278         }
3279
3280         if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0)
3281                 return (pmc_syscall = -1);
3282
3283         cpu_info.pm_cputype = op_cpu_info.pm_cputype;
3284         cpu_info.pm_ncpu    = op_cpu_info.pm_ncpu;
3285         cpu_info.pm_npmc    = op_cpu_info.pm_npmc;
3286         cpu_info.pm_nclass  = op_cpu_info.pm_nclass;
3287         for (n = 0; n < cpu_info.pm_nclass; n++)
3288                 memcpy(&cpu_info.pm_classes[n], &op_cpu_info.pm_classes[n],
3289                     sizeof(cpu_info.pm_classes[n]));
3290
3291         pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE *
3292             sizeof(struct pmc_class_descr *));
3293
3294         if (pmc_class_table == NULL)
3295                 return (-1);
3296
3297         for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++)
3298                 pmc_class_table[n] = NULL;
3299
3300         /*
3301          * Get soft events list.
3302          */
3303         soft_event_info.pm_class = PMC_CLASS_SOFT;
3304         if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0)
3305                 return (pmc_syscall = -1);
3306
3307         /* Map soft events to static list. */
3308         for (n = 0; n < soft_event_info.pm_nevent; n++) {
3309                 soft_event_table[n].pm_ev_name =
3310                     soft_event_info.pm_events[n].pm_ev_name;
3311                 soft_event_table[n].pm_ev_code =
3312                     soft_event_info.pm_events[n].pm_ev_code;
3313         }
3314         soft_class_table_descr.pm_evc_event_table_size = \
3315             soft_event_info.pm_nevent;
3316         soft_class_table_descr.pm_evc_event_table = \
3317             soft_event_table;
3318
3319         /*
3320          * Fill in the class table.
3321          */
3322         n = 0;
3323
3324         /* Fill soft events information. */
3325         pmc_class_table[n++] = &soft_class_table_descr;
3326 #if defined(__amd64__) || defined(__i386__)
3327         if (cpu_info.pm_cputype != PMC_CPU_GENERIC)
3328                 pmc_class_table[n++] = &tsc_class_table_descr;
3329
3330         /*
3331          * Check if this CPU has fixed function counters.
3332          */
3333         cpu_has_iaf_counters = 0;
3334         for (t = 0; t < cpu_info.pm_nclass; t++)
3335                 if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF &&
3336                     cpu_info.pm_classes[t].pm_num > 0)
3337                         cpu_has_iaf_counters = 1;
3338 #endif
3339
3340 #define PMC_MDEP_INIT(C) do {                                   \
3341                 pmc_mdep_event_aliases    = C##_aliases;        \
3342                 pmc_mdep_class_list  = C##_pmc_classes;         \
3343                 pmc_mdep_class_list_size =                      \
3344                     PMC_TABLE_SIZE(C##_pmc_classes);            \
3345         } while (0)
3346
3347 #define PMC_MDEP_INIT_INTEL_V2(C) do {                                  \
3348                 PMC_MDEP_INIT(C);                                       \
3349                 pmc_class_table[n++] = &iaf_class_table_descr;          \
3350                 if (!cpu_has_iaf_counters)                              \
3351                         pmc_mdep_event_aliases =                        \
3352                                 C##_aliases_without_iaf;                \
3353                 pmc_class_table[n] = &C##_class_table_descr;            \
3354         } while (0)
3355
3356         /* Configure the event name parser. */
3357         switch (cpu_info.pm_cputype) {
3358 #if defined(__i386__)
3359         case PMC_CPU_AMD_K7:
3360                 PMC_MDEP_INIT(k7);
3361                 pmc_class_table[n] = &k7_class_table_descr;
3362                 break;
3363         case PMC_CPU_INTEL_P5:
3364                 PMC_MDEP_INIT(p5);
3365                 pmc_class_table[n]  = &p5_class_table_descr;
3366                 break;
3367         case PMC_CPU_INTEL_P6:          /* P6 ... Pentium M CPUs have */
3368         case PMC_CPU_INTEL_PII:         /* similar PMCs. */
3369         case PMC_CPU_INTEL_PIII:
3370         case PMC_CPU_INTEL_PM:
3371                 PMC_MDEP_INIT(p6);
3372                 pmc_class_table[n] = &p6_class_table_descr;
3373                 break;
3374 #endif
3375 #if defined(__amd64__) || defined(__i386__)
3376         case PMC_CPU_AMD_K8:
3377                 PMC_MDEP_INIT(k8);
3378                 pmc_class_table[n] = &k8_class_table_descr;
3379                 break;
3380         case PMC_CPU_INTEL_ATOM:
3381                 PMC_MDEP_INIT_INTEL_V2(atom);
3382                 break;
3383         case PMC_CPU_INTEL_ATOM_SILVERMONT:
3384                 PMC_MDEP_INIT_INTEL_V2(atom_silvermont);
3385                 break;
3386         case PMC_CPU_INTEL_CORE:
3387                 PMC_MDEP_INIT(core);
3388                 pmc_class_table[n] = &core_class_table_descr;
3389                 break;
3390         case PMC_CPU_INTEL_CORE2:
3391         case PMC_CPU_INTEL_CORE2EXTREME:
3392                 PMC_MDEP_INIT_INTEL_V2(core2);
3393                 break;
3394         case PMC_CPU_INTEL_COREI7:
3395                 pmc_class_table[n++] = &ucf_class_table_descr;
3396                 pmc_class_table[n++] = &corei7uc_class_table_descr;
3397                 PMC_MDEP_INIT_INTEL_V2(corei7);
3398                 break;
3399         case PMC_CPU_INTEL_NEHALEM_EX:
3400                 PMC_MDEP_INIT_INTEL_V2(nehalem_ex);
3401                 break;
3402         case PMC_CPU_INTEL_HASWELL:
3403                 pmc_class_table[n++] = &ucf_class_table_descr;
3404                 pmc_class_table[n++] = &haswelluc_class_table_descr;
3405                 PMC_MDEP_INIT_INTEL_V2(haswell);
3406                 break;
3407         case PMC_CPU_INTEL_HASWELL_XEON:
3408                 PMC_MDEP_INIT_INTEL_V2(haswell_xeon);
3409                 break;
3410         case PMC_CPU_INTEL_BROADWELL:
3411                 pmc_class_table[n++] = &ucf_class_table_descr;
3412                 pmc_class_table[n++] = &broadwelluc_class_table_descr;
3413                 PMC_MDEP_INIT_INTEL_V2(broadwell);
3414                 break;
3415         case PMC_CPU_INTEL_BROADWELL_XEON:
3416                 PMC_MDEP_INIT_INTEL_V2(broadwell_xeon);
3417                 break;
3418         case PMC_CPU_INTEL_SKYLAKE:
3419                 PMC_MDEP_INIT_INTEL_V2(skylake);
3420                 break;
3421         case PMC_CPU_INTEL_SKYLAKE_XEON:
3422                 PMC_MDEP_INIT_INTEL_V2(skylake_xeon);
3423                 break;
3424         case PMC_CPU_INTEL_IVYBRIDGE:
3425                 PMC_MDEP_INIT_INTEL_V2(ivybridge);
3426                 break;
3427         case PMC_CPU_INTEL_IVYBRIDGE_XEON:
3428                 PMC_MDEP_INIT_INTEL_V2(ivybridge_xeon);
3429                 break;
3430         case PMC_CPU_INTEL_SANDYBRIDGE:
3431                 pmc_class_table[n++] = &ucf_class_table_descr;
3432                 pmc_class_table[n++] = &sandybridgeuc_class_table_descr;
3433                 PMC_MDEP_INIT_INTEL_V2(sandybridge);
3434                 break;
3435         case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
3436                 PMC_MDEP_INIT_INTEL_V2(sandybridge_xeon);
3437                 break;
3438         case PMC_CPU_INTEL_WESTMERE:
3439                 pmc_class_table[n++] = &ucf_class_table_descr;
3440                 pmc_class_table[n++] = &westmereuc_class_table_descr;
3441                 PMC_MDEP_INIT_INTEL_V2(westmere);
3442                 break;
3443         case PMC_CPU_INTEL_WESTMERE_EX:
3444                 PMC_MDEP_INIT_INTEL_V2(westmere_ex);
3445                 break;
3446         case PMC_CPU_INTEL_PIV:
3447                 PMC_MDEP_INIT(p4);
3448                 pmc_class_table[n] = &p4_class_table_descr;
3449                 break;
3450 #endif
3451         case PMC_CPU_GENERIC:
3452                 PMC_MDEP_INIT(generic);
3453                 break;
3454 #if defined(__arm__)
3455 #if defined(__XSCALE__)
3456         case PMC_CPU_INTEL_XSCALE:
3457                 PMC_MDEP_INIT(xscale);
3458                 pmc_class_table[n] = &xscale_class_table_descr;
3459                 break;
3460 #endif
3461         case PMC_CPU_ARMV7_CORTEX_A8:
3462                 PMC_MDEP_INIT(cortex_a8);
3463                 pmc_class_table[n] = &cortex_a8_class_table_descr;
3464                 break;
3465         case PMC_CPU_ARMV7_CORTEX_A9:
3466                 PMC_MDEP_INIT(cortex_a9);
3467                 pmc_class_table[n] = &cortex_a9_class_table_descr;
3468                 break;
3469 #endif
3470 #if defined(__aarch64__)
3471         case PMC_CPU_ARMV8_CORTEX_A53:
3472                 PMC_MDEP_INIT(cortex_a53);
3473                 pmc_class_table[n] = &cortex_a53_class_table_descr;
3474                 break;
3475         case PMC_CPU_ARMV8_CORTEX_A57:
3476                 PMC_MDEP_INIT(cortex_a57);
3477                 pmc_class_table[n] = &cortex_a57_class_table_descr;
3478                 break;
3479 #endif
3480 #if defined(__mips__)
3481         case PMC_CPU_MIPS_24K:
3482                 PMC_MDEP_INIT(mips24k);
3483                 pmc_class_table[n] = &mips24k_class_table_descr;
3484                 break;
3485         case PMC_CPU_MIPS_74K:
3486                 PMC_MDEP_INIT(mips74k);
3487                 pmc_class_table[n] = &mips74k_class_table_descr;
3488                 break;
3489         case PMC_CPU_MIPS_OCTEON:
3490                 PMC_MDEP_INIT(octeon);
3491                 pmc_class_table[n] = &octeon_class_table_descr;
3492                 break;
3493 #endif /* __mips__ */
3494 #if defined(__powerpc__)
3495         case PMC_CPU_PPC_7450:
3496                 PMC_MDEP_INIT(ppc7450);
3497                 pmc_class_table[n] = &ppc7450_class_table_descr;
3498                 break;
3499         case PMC_CPU_PPC_970:
3500                 PMC_MDEP_INIT(ppc970);
3501                 pmc_class_table[n] = &ppc970_class_table_descr;
3502                 break;
3503         case PMC_CPU_PPC_E500:
3504                 PMC_MDEP_INIT(e500);
3505                 pmc_class_table[n] = &e500_class_table_descr;
3506                 break;
3507 #endif
3508         default:
3509                 /*
3510                  * Some kind of CPU this version of the library knows nothing
3511                  * about.  This shouldn't happen since the abi version check
3512                  * should have caught this.
3513                  */
3514                 errno = ENXIO;
3515                 return (pmc_syscall = -1);
3516         }
3517
3518         return (0);
3519 }
3520
3521 const char *
3522 pmc_name_of_capability(enum pmc_caps cap)
3523 {
3524         int i;
3525
3526         /*
3527          * 'cap' should have a single bit set and should be in
3528          * range.
3529          */
3530         if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
3531             cap > PMC_CAP_LAST) {
3532                 errno = EINVAL;
3533                 return (NULL);
3534         }
3535
3536         i = ffs(cap);
3537         return (pmc_capability_names[i - 1]);
3538 }
3539
3540 const char *
3541 pmc_name_of_class(enum pmc_class pc)
3542 {
3543         size_t n;
3544
3545         for (n = 0; n < PMC_TABLE_SIZE(pmc_class_names); n++)
3546                 if (pc == pmc_class_names[n].pm_class)
3547                         return (pmc_class_names[n].pm_name);
3548
3549         errno = EINVAL;
3550         return (NULL);
3551 }
3552
3553 const char *
3554 pmc_name_of_cputype(enum pmc_cputype cp)
3555 {
3556         size_t n;
3557
3558         for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++)
3559                 if (cp == pmc_cputype_names[n].pm_cputype)
3560                         return (pmc_cputype_names[n].pm_name);
3561
3562         errno = EINVAL;
3563         return (NULL);
3564 }
3565
3566 const char *
3567 pmc_name_of_disposition(enum pmc_disp pd)
3568 {
3569         if ((int) pd >= PMC_DISP_FIRST &&
3570             pd <= PMC_DISP_LAST)
3571                 return (pmc_disposition_names[pd]);
3572
3573         errno = EINVAL;
3574         return (NULL);
3575 }
3576
3577 const char *
3578 _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
3579 {
3580         const struct pmc_event_descr *ev, *evfence;
3581
3582         ev = evfence = NULL;
3583         if (pe >= PMC_EV_IAF_FIRST && pe <= PMC_EV_IAF_LAST) {
3584                 ev = iaf_event_table;
3585                 evfence = iaf_event_table + PMC_EVENT_TABLE_SIZE(iaf);
3586         } else if (pe >= PMC_EV_IAP_FIRST && pe <= PMC_EV_IAP_LAST) {
3587                 switch (cpu) {
3588                 case PMC_CPU_INTEL_ATOM:
3589                         ev = atom_event_table;
3590                         evfence = atom_event_table + PMC_EVENT_TABLE_SIZE(atom);
3591                         break;
3592                 case PMC_CPU_INTEL_ATOM_SILVERMONT:
3593                         ev = atom_silvermont_event_table;
3594                         evfence = atom_silvermont_event_table +
3595                             PMC_EVENT_TABLE_SIZE(atom_silvermont);
3596                         break;
3597                 case PMC_CPU_INTEL_CORE:
3598                         ev = core_event_table;
3599                         evfence = core_event_table + PMC_EVENT_TABLE_SIZE(core);
3600                         break;
3601                 case PMC_CPU_INTEL_CORE2:
3602                 case PMC_CPU_INTEL_CORE2EXTREME:
3603                         ev = core2_event_table;
3604                         evfence = core2_event_table + PMC_EVENT_TABLE_SIZE(core2);
3605                         break;
3606                 case PMC_CPU_INTEL_COREI7:
3607                         ev = corei7_event_table;
3608                         evfence = corei7_event_table + PMC_EVENT_TABLE_SIZE(corei7);
3609                         break;
3610                 case PMC_CPU_INTEL_NEHALEM_EX:
3611                         ev = nehalem_ex_event_table;
3612                         evfence = nehalem_ex_event_table +
3613                             PMC_EVENT_TABLE_SIZE(nehalem_ex);
3614                         break;
3615                 case PMC_CPU_INTEL_HASWELL:
3616                         ev = haswell_event_table;
3617                         evfence = haswell_event_table + PMC_EVENT_TABLE_SIZE(haswell);
3618                         break;
3619                 case PMC_CPU_INTEL_HASWELL_XEON:
3620                         ev = haswell_xeon_event_table;
3621                         evfence = haswell_xeon_event_table + PMC_EVENT_TABLE_SIZE(haswell_xeon);
3622                         break;
3623                 case PMC_CPU_INTEL_BROADWELL:
3624                         ev = broadwell_event_table;
3625                         evfence = broadwell_event_table + PMC_EVENT_TABLE_SIZE(broadwell);
3626                         break;
3627                 case PMC_CPU_INTEL_BROADWELL_XEON:
3628                         ev = broadwell_xeon_event_table;
3629                         evfence = broadwell_xeon_event_table + PMC_EVENT_TABLE_SIZE(broadwell_xeon);
3630                         break;
3631                 case PMC_CPU_INTEL_SKYLAKE:
3632                         ev = skylake_event_table;
3633                         evfence = skylake_event_table +
3634                             PMC_EVENT_TABLE_SIZE(skylake);
3635                         break;
3636                 case PMC_CPU_INTEL_SKYLAKE_XEON:
3637                         ev = skylake_xeon_event_table;
3638                         evfence = skylake_xeon_event_table +
3639                             PMC_EVENT_TABLE_SIZE(skylake_xeon);
3640                         break;
3641                 case PMC_CPU_INTEL_IVYBRIDGE:
3642                         ev = ivybridge_event_table;
3643                         evfence = ivybridge_event_table + PMC_EVENT_TABLE_SIZE(ivybridge);
3644                         break;
3645                 case PMC_CPU_INTEL_IVYBRIDGE_XEON:
3646                         ev = ivybridge_xeon_event_table;
3647                         evfence = ivybridge_xeon_event_table + PMC_EVENT_TABLE_SIZE(ivybridge_xeon);
3648                         break;
3649                 case PMC_CPU_INTEL_SANDYBRIDGE:
3650                         ev = sandybridge_event_table;
3651                         evfence = sandybridge_event_table + PMC_EVENT_TABLE_SIZE(sandybridge);
3652                         break;
3653                 case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
3654                         ev = sandybridge_xeon_event_table;
3655                         evfence = sandybridge_xeon_event_table + PMC_EVENT_TABLE_SIZE(sandybridge_xeon);
3656                         break;
3657                 case PMC_CPU_INTEL_WESTMERE:
3658                         ev = westmere_event_table;
3659                         evfence = westmere_event_table + PMC_EVENT_TABLE_SIZE(westmere);
3660                         break;
3661                 case PMC_CPU_INTEL_WESTMERE_EX:
3662                         ev = westmere_ex_event_table;
3663                         evfence = westmere_ex_event_table +
3664                             PMC_EVENT_TABLE_SIZE(westmere_ex);
3665                         break;
3666                 default:        /* Unknown CPU type. */
3667                         break;
3668                 }
3669         } else if (pe >= PMC_EV_UCF_FIRST && pe <= PMC_EV_UCF_LAST) {
3670                 ev = ucf_event_table;
3671                 evfence = ucf_event_table + PMC_EVENT_TABLE_SIZE(ucf);
3672         } else if (pe >= PMC_EV_UCP_FIRST && pe <= PMC_EV_UCP_LAST) {
3673                 switch (cpu) {
3674                 case PMC_CPU_INTEL_COREI7:
3675                         ev = corei7uc_event_table;
3676                         evfence = corei7uc_event_table + PMC_EVENT_TABLE_SIZE(corei7uc);
3677                         break;
3678                 case PMC_CPU_INTEL_SANDYBRIDGE:
3679                         ev = sandybridgeuc_event_table;
3680                         evfence = sandybridgeuc_event_table + PMC_EVENT_TABLE_SIZE(sandybridgeuc);
3681                         break;
3682                 case PMC_CPU_INTEL_WESTMERE:
3683                         ev = westmereuc_event_table;
3684                         evfence = westmereuc_event_table + PMC_EVENT_TABLE_SIZE(westmereuc);
3685                         break;
3686                 default:        /* Unknown CPU type. */
3687                         break;
3688                 }
3689         } else if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) {
3690                 ev = k7_event_table;
3691                 evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7);
3692         } else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
3693                 ev = k8_event_table;
3694                 evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8);
3695         } else if (pe >= PMC_EV_P4_FIRST && pe <= PMC_EV_P4_LAST) {
3696                 ev = p4_event_table;
3697                 evfence = p4_event_table + PMC_EVENT_TABLE_SIZE(p4);
3698         } else if (pe >= PMC_EV_P5_FIRST && pe <= PMC_EV_P5_LAST) {
3699                 ev = p5_event_table;
3700                 evfence = p5_event_table + PMC_EVENT_TABLE_SIZE(p5);
3701         } else if (pe >= PMC_EV_P6_FIRST && pe <= PMC_EV_P6_LAST) {
3702                 ev = p6_event_table;
3703                 evfence = p6_event_table + PMC_EVENT_TABLE_SIZE(p6);
3704         } else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) {
3705                 ev = xscale_event_table;
3706                 evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale);
3707         } else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) {
3708                 switch (cpu) {
3709                 case PMC_CPU_ARMV7_CORTEX_A8:
3710                         ev = cortex_a8_event_table;
3711                         evfence = cortex_a8_event_table + PMC_EVENT_TABLE_SIZE(cortex_a8);
3712                         break;
3713                 case PMC_CPU_ARMV7_CORTEX_A9:
3714                         ev = cortex_a9_event_table;
3715                         evfence = cortex_a9_event_table + PMC_EVENT_TABLE_SIZE(cortex_a9);
3716                         break;
3717                 default:        /* Unknown CPU type. */
3718                         break;
3719                 }
3720         } else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) {
3721                 switch (cpu) {
3722                 case PMC_CPU_ARMV8_CORTEX_A53:
3723                         ev = cortex_a53_event_table;
3724                         evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53);
3725                         break;
3726                 case PMC_CPU_ARMV8_CORTEX_A57:
3727                         ev = cortex_a57_event_table;
3728                         evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57);
3729                         break;
3730                 default:        /* Unknown CPU type. */
3731                         break;
3732                 }
3733         } else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) {
3734                 ev = mips24k_event_table;
3735                 evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k);
3736         } else if (pe >= PMC_EV_MIPS74K_FIRST && pe <= PMC_EV_MIPS74K_LAST) {
3737                 ev = mips74k_event_table;
3738                 evfence = mips74k_event_table + PMC_EVENT_TABLE_SIZE(mips74k);
3739         } else if (pe >= PMC_EV_OCTEON_FIRST && pe <= PMC_EV_OCTEON_LAST) {
3740                 ev = octeon_event_table;
3741                 evfence = octeon_event_table + PMC_EVENT_TABLE_SIZE(octeon);
3742         } else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) {
3743                 ev = ppc7450_event_table;
3744                 evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450);
3745         } else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) {
3746                 ev = ppc970_event_table;
3747                 evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970);
3748         } else if (pe >= PMC_EV_E500_FIRST && pe <= PMC_EV_E500_LAST) {
3749                 ev = e500_event_table;
3750                 evfence = e500_event_table + PMC_EVENT_TABLE_SIZE(e500);
3751         } else if (pe == PMC_EV_TSC_TSC) {
3752                 ev = tsc_event_table;
3753                 evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
3754         } else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) {
3755                 ev = soft_event_table;
3756                 evfence = soft_event_table + soft_event_info.pm_nevent;
3757         }
3758
3759         for (; ev != evfence; ev++)
3760                 if (pe == ev->pm_ev_code)
3761                         return (ev->pm_ev_name);
3762
3763         return (NULL);
3764 }
3765
3766 const char *
3767 pmc_name_of_event(enum pmc_event pe)
3768 {
3769         const char *n;
3770
3771         if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
3772                 return (n);
3773
3774         errno = EINVAL;
3775         return (NULL);
3776 }
3777
3778 const char *
3779 pmc_name_of_mode(enum pmc_mode pm)
3780 {
3781         if ((int) pm >= PMC_MODE_FIRST &&
3782             pm <= PMC_MODE_LAST)
3783                 return (pmc_mode_names[pm]);
3784
3785         errno = EINVAL;
3786         return (NULL);
3787 }
3788
3789 const char *
3790 pmc_name_of_state(enum pmc_state ps)
3791 {
3792         if ((int) ps >= PMC_STATE_FIRST &&
3793             ps <= PMC_STATE_LAST)
3794                 return (pmc_state_names[ps]);
3795
3796         errno = EINVAL;
3797         return (NULL);
3798 }
3799
3800 int
3801 pmc_ncpu(void)
3802 {
3803         if (pmc_syscall == -1) {
3804                 errno = ENXIO;
3805                 return (-1);
3806         }
3807
3808         return (cpu_info.pm_ncpu);
3809 }
3810
3811 int
3812 pmc_npmc(int cpu)
3813 {
3814         if (pmc_syscall == -1) {
3815                 errno = ENXIO;
3816                 return (-1);
3817         }
3818
3819         if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
3820                 errno = EINVAL;
3821                 return (-1);
3822         }
3823
3824         return (cpu_info.pm_npmc);
3825 }
3826
3827 int
3828 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
3829 {
3830         int nbytes, npmc;
3831         struct pmc_op_getpmcinfo *pmci;
3832
3833         if ((npmc = pmc_npmc(cpu)) < 0)
3834                 return (-1);
3835
3836         nbytes = sizeof(struct pmc_op_getpmcinfo) +
3837             npmc * sizeof(struct pmc_info);
3838
3839         if ((pmci = calloc(1, nbytes)) == NULL)
3840                 return (-1);
3841
3842         pmci->pm_cpu  = cpu;
3843
3844         if (PMC_CALL(GETPMCINFO, pmci) < 0) {
3845                 free(pmci);
3846                 return (-1);
3847         }
3848
3849         /* kernel<->library, library<->userland interfaces are identical */
3850         *ppmci = (struct pmc_pmcinfo *) pmci;
3851         return (0);
3852 }
3853
3854 int
3855 pmc_read(pmc_id_t pmc, pmc_value_t *value)
3856 {
3857         struct pmc_op_pmcrw pmc_read_op;
3858
3859         pmc_read_op.pm_pmcid = pmc;
3860         pmc_read_op.pm_flags = PMC_F_OLDVALUE;
3861         pmc_read_op.pm_value = -1;
3862
3863         if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
3864                 return (-1);
3865
3866         *value = pmc_read_op.pm_value;
3867         return (0);
3868 }
3869
3870 int
3871 pmc_release(pmc_id_t pmc)
3872 {
3873         struct pmc_op_simple    pmc_release_args;
3874
3875         pmc_release_args.pm_pmcid = pmc;
3876         return (PMC_CALL(PMCRELEASE, &pmc_release_args));
3877 }
3878
3879 int
3880 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
3881 {
3882         struct pmc_op_pmcrw pmc_rw_op;
3883
3884         pmc_rw_op.pm_pmcid = pmc;
3885         pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
3886         pmc_rw_op.pm_value = newvalue;
3887
3888         if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
3889                 return (-1);
3890
3891         *oldvaluep = pmc_rw_op.pm_value;
3892         return (0);
3893 }
3894
3895 int
3896 pmc_set(pmc_id_t pmc, pmc_value_t value)
3897 {
3898         struct pmc_op_pmcsetcount sc;
3899
3900         sc.pm_pmcid = pmc;
3901         sc.pm_count = value;
3902
3903         if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
3904                 return (-1);
3905         return (0);
3906 }
3907
3908 int
3909 pmc_start(pmc_id_t pmc)
3910 {
3911         struct pmc_op_simple    pmc_start_args;
3912
3913         pmc_start_args.pm_pmcid = pmc;
3914         return (PMC_CALL(PMCSTART, &pmc_start_args));
3915 }
3916
3917 int
3918 pmc_stop(pmc_id_t pmc)
3919 {
3920         struct pmc_op_simple    pmc_stop_args;
3921
3922         pmc_stop_args.pm_pmcid = pmc;
3923         return (PMC_CALL(PMCSTOP, &pmc_stop_args));
3924 }
3925
3926 int
3927 pmc_width(pmc_id_t pmcid, uint32_t *width)
3928 {
3929         unsigned int i;
3930         enum pmc_class cl;
3931
3932         cl = PMC_ID_TO_CLASS(pmcid);
3933         for (i = 0; i < cpu_info.pm_nclass; i++)
3934                 if (cpu_info.pm_classes[i].pm_class == cl) {
3935                         *width = cpu_info.pm_classes[i].pm_width;
3936                         return (0);
3937                 }
3938         errno = EINVAL;
3939         return (-1);
3940 }
3941
3942 int
3943 pmc_write(pmc_id_t pmc, pmc_value_t value)
3944 {
3945         struct pmc_op_pmcrw pmc_write_op;
3946
3947         pmc_write_op.pm_pmcid = pmc;
3948         pmc_write_op.pm_flags = PMC_F_NEWVALUE;
3949         pmc_write_op.pm_value = value;
3950         return (PMC_CALL(PMCRW, &pmc_write_op));
3951 }
3952
3953 int
3954 pmc_writelog(uint32_t userdata)
3955 {
3956         struct pmc_op_writelog wl;
3957
3958         wl.pm_userdata = userdata;
3959         return (PMC_CALL(WRITELOG, &wl));
3960 }