]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libpmc/libpmc.c
libpmc: don't leak string in error case either
[FreeBSD/FreeBSD.git] / lib / libpmc / libpmc.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2003-2008 Joseph Koshy
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/types.h>
33 #include <sys/param.h>
34 #include <sys/module.h>
35 #include <sys/pmc.h>
36 #include <sys/syscall.h>
37
38 #include <ctype.h>
39 #include <errno.h>
40 #include <fcntl.h>
41 #include <pmc.h>
42 #include <stdio.h>
43 #include <stdlib.h>
44 #include <string.h>
45 #include <strings.h>
46 #include <unistd.h>
47
48 #include "libpmcinternal.h"
49
50 /* Function prototypes */
51 #if defined(__i386__)
52 static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
53     struct pmc_op_pmcallocate *_pmc_config);
54 #endif
55 #if defined(__amd64__) || defined(__i386__)
56 static int iaf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
57     struct pmc_op_pmcallocate *_pmc_config);
58 static int iap_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
59     struct pmc_op_pmcallocate *_pmc_config);
60 static int ucf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
61     struct pmc_op_pmcallocate *_pmc_config);
62 static int ucp_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
63     struct pmc_op_pmcallocate *_pmc_config);
64 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
65     struct pmc_op_pmcallocate *_pmc_config);
66 static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
67     struct pmc_op_pmcallocate *_pmc_config);
68 #endif
69 #if defined(__i386__)
70 static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
71     struct pmc_op_pmcallocate *_pmc_config);
72 static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
73     struct pmc_op_pmcallocate *_pmc_config);
74 #endif
75 #if defined(__amd64__) || defined(__i386__)
76 static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
77     struct pmc_op_pmcallocate *_pmc_config);
78 #endif
79 #if defined(__arm__)
80 #if defined(__XSCALE__)
81 static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
82     struct pmc_op_pmcallocate *_pmc_config);
83 #endif
84 static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
85     struct pmc_op_pmcallocate *_pmc_config);
86 #endif
87 #if defined(__aarch64__)
88 static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
89     struct pmc_op_pmcallocate *_pmc_config);
90 #endif
91 #if defined(__mips__)
92 static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec,
93                              struct pmc_op_pmcallocate *_pmc_config);
94 #endif /* __mips__ */
95 static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
96     struct pmc_op_pmcallocate *_pmc_config);
97
98 #if defined(__powerpc__)
99 static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec,
100                              struct pmc_op_pmcallocate *_pmc_config);
101 #endif /* __powerpc__ */
102
103 #define PMC_CALL(cmd, params)                           \
104         syscall(pmc_syscall, PMC_OP_##cmd, (params))
105
106 /*
107  * Event aliases provide a way for the user to ask for generic events
108  * like "cache-misses", or "instructions-retired".  These aliases are
109  * mapped to the appropriate canonical event descriptions using a
110  * lookup table.
111  */
112 struct pmc_event_alias {
113         const char      *pm_alias;
114         const char      *pm_spec;
115 };
116
117 static const struct pmc_event_alias *pmc_mdep_event_aliases;
118
119 /*
120  * The pmc_event_descr structure maps symbolic names known to the user
121  * to integer codes used by the PMC KLD.
122  */
123 struct pmc_event_descr {
124         const char      *pm_ev_name;
125         enum pmc_event  pm_ev_code;
126 };
127
128 /*
129  * The pmc_class_descr structure maps class name prefixes for
130  * event names to event tables and other PMC class data.
131  */
132 struct pmc_class_descr {
133         const char      *pm_evc_name;
134         size_t          pm_evc_name_size;
135         enum pmc_class  pm_evc_class;
136         const struct pmc_event_descr *pm_evc_event_table;
137         size_t          pm_evc_event_table_size;
138         int             (*pm_evc_allocate_pmc)(enum pmc_event _pe,
139                             char *_ctrspec, struct pmc_op_pmcallocate *_pa);
140 };
141
142 #define PMC_TABLE_SIZE(N)       (sizeof(N)/sizeof(N[0]))
143 #define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table)
144
145 #undef  __PMC_EV
146 #define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
147
148 /*
149  * PMC_CLASSDEP_TABLE(NAME, CLASS)
150  *
151  * Define a table mapping event names and aliases to HWPMC event IDs.
152  */
153 #define PMC_CLASSDEP_TABLE(N, C)                                \
154         static const struct pmc_event_descr N##_event_table[] = \
155         {                                                       \
156                 __PMC_EV_##C()                                  \
157         }
158
159 PMC_CLASSDEP_TABLE(iaf, IAF);
160 PMC_CLASSDEP_TABLE(k7, K7);
161 PMC_CLASSDEP_TABLE(k8, K8);
162 PMC_CLASSDEP_TABLE(p4, P4);
163 PMC_CLASSDEP_TABLE(p5, P5);
164 PMC_CLASSDEP_TABLE(p6, P6);
165 PMC_CLASSDEP_TABLE(xscale, XSCALE);
166 PMC_CLASSDEP_TABLE(armv7, ARMV7);
167 PMC_CLASSDEP_TABLE(armv8, ARMV8);
168 PMC_CLASSDEP_TABLE(mips24k, MIPS24K);
169 PMC_CLASSDEP_TABLE(mips74k, MIPS74K);
170 PMC_CLASSDEP_TABLE(octeon, OCTEON);
171 PMC_CLASSDEP_TABLE(ucf, UCF);
172 PMC_CLASSDEP_TABLE(ppc7450, PPC7450);
173 PMC_CLASSDEP_TABLE(ppc970, PPC970);
174 PMC_CLASSDEP_TABLE(e500, E500);
175
176 static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT];
177
178 #undef  __PMC_EV_ALIAS
179 #define __PMC_EV_ALIAS(N,CODE)  { N, PMC_EV_##CODE },
180
181 static const struct pmc_event_descr atom_event_table[] =
182 {
183         __PMC_EV_ALIAS_ATOM()
184 };
185
186 static const struct pmc_event_descr atom_silvermont_event_table[] =
187 {
188         __PMC_EV_ALIAS_ATOM_SILVERMONT()
189 };
190
191 static const struct pmc_event_descr core_event_table[] =
192 {
193         __PMC_EV_ALIAS_CORE()
194 };
195
196
197 static const struct pmc_event_descr core2_event_table[] =
198 {
199         __PMC_EV_ALIAS_CORE2()
200 };
201
202 static const struct pmc_event_descr corei7_event_table[] =
203 {
204         __PMC_EV_ALIAS_COREI7()
205 };
206
207 static const struct pmc_event_descr nehalem_ex_event_table[] =
208 {
209         __PMC_EV_ALIAS_COREI7()
210 };
211
212 static const struct pmc_event_descr haswell_event_table[] =
213 {
214         __PMC_EV_ALIAS_HASWELL()
215 };
216
217 static const struct pmc_event_descr haswell_xeon_event_table[] =
218 {
219         __PMC_EV_ALIAS_HASWELL_XEON()
220 };
221
222 static const struct pmc_event_descr broadwell_event_table[] =
223 {
224         __PMC_EV_ALIAS_BROADWELL()
225 };
226
227 static const struct pmc_event_descr broadwell_xeon_event_table[] =
228 {
229         __PMC_EV_ALIAS_BROADWELL_XEON()
230 };
231
232 static const struct pmc_event_descr skylake_event_table[] =
233 {
234         __PMC_EV_ALIAS_SKYLAKE()
235 };
236
237 static const struct pmc_event_descr skylake_xeon_event_table[] =
238 {
239         __PMC_EV_ALIAS_SKYLAKE_XEON()
240 };
241
242 static const struct pmc_event_descr ivybridge_event_table[] =
243 {
244         __PMC_EV_ALIAS_IVYBRIDGE()
245 };
246
247 static const struct pmc_event_descr ivybridge_xeon_event_table[] = 
248 {
249         __PMC_EV_ALIAS_IVYBRIDGE_XEON()
250 };
251
252 static const struct pmc_event_descr sandybridge_event_table[] = 
253 {
254         __PMC_EV_ALIAS_SANDYBRIDGE()
255 };
256
257 static const struct pmc_event_descr sandybridge_xeon_event_table[] = 
258 {
259         __PMC_EV_ALIAS_SANDYBRIDGE_XEON()
260 };
261
262 static const struct pmc_event_descr westmere_event_table[] =
263 {
264         __PMC_EV_ALIAS_WESTMERE()
265 };
266
267 static const struct pmc_event_descr westmere_ex_event_table[] =
268 {
269         __PMC_EV_ALIAS_WESTMERE()
270 };
271
272 static const struct pmc_event_descr corei7uc_event_table[] =
273 {
274         __PMC_EV_ALIAS_COREI7UC()
275 };
276
277 static const struct pmc_event_descr haswelluc_event_table[] =
278 {
279         __PMC_EV_ALIAS_HASWELLUC()
280 };
281
282 static const struct pmc_event_descr broadwelluc_event_table[] =
283 {
284         __PMC_EV_ALIAS_BROADWELLUC()
285 };
286
287 static const struct pmc_event_descr sandybridgeuc_event_table[] =
288 {
289         __PMC_EV_ALIAS_SANDYBRIDGEUC()
290 };
291
292 static const struct pmc_event_descr westmereuc_event_table[] =
293 {
294         __PMC_EV_ALIAS_WESTMEREUC()
295 };
296
297 static const struct pmc_event_descr cortex_a8_event_table[] = 
298 {
299         __PMC_EV_ALIAS_ARMV7_CORTEX_A8()
300 };
301
302 static const struct pmc_event_descr cortex_a9_event_table[] = 
303 {
304         __PMC_EV_ALIAS_ARMV7_CORTEX_A9()
305 };
306
307 static const struct pmc_event_descr cortex_a53_event_table[] = 
308 {
309         __PMC_EV_ALIAS_ARMV8_CORTEX_A53()
310 };
311
312 static const struct pmc_event_descr cortex_a57_event_table[] = 
313 {
314         __PMC_EV_ALIAS_ARMV8_CORTEX_A57()
315 };
316
317 /*
318  * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...)
319  *
320  * Map a CPU to the PMC classes it supports.
321  */
322 #define PMC_MDEP_TABLE(N,C,...)                         \
323         static const enum pmc_class N##_pmc_classes[] = {       \
324                 PMC_CLASS_##C, __VA_ARGS__                      \
325         }
326
327 PMC_MDEP_TABLE(atom, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
328 PMC_MDEP_TABLE(atom_silvermont, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
329 PMC_MDEP_TABLE(core, IAP, PMC_CLASS_SOFT, PMC_CLASS_TSC);
330 PMC_MDEP_TABLE(core2, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
331 PMC_MDEP_TABLE(corei7, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
332 PMC_MDEP_TABLE(nehalem_ex, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
333 PMC_MDEP_TABLE(haswell, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
334 PMC_MDEP_TABLE(haswell_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
335 PMC_MDEP_TABLE(broadwell, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
336 PMC_MDEP_TABLE(broadwell_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
337 PMC_MDEP_TABLE(skylake, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
338 PMC_MDEP_TABLE(skylake_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
339 PMC_MDEP_TABLE(ivybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
340 PMC_MDEP_TABLE(ivybridge_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
341 PMC_MDEP_TABLE(sandybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
342 PMC_MDEP_TABLE(sandybridge_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
343 PMC_MDEP_TABLE(westmere, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
344 PMC_MDEP_TABLE(westmere_ex, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
345 PMC_MDEP_TABLE(k7, K7, PMC_CLASS_SOFT, PMC_CLASS_TSC);
346 PMC_MDEP_TABLE(k8, K8, PMC_CLASS_SOFT, PMC_CLASS_TSC);
347 PMC_MDEP_TABLE(p4, P4, PMC_CLASS_SOFT, PMC_CLASS_TSC);
348 PMC_MDEP_TABLE(p5, P5, PMC_CLASS_SOFT, PMC_CLASS_TSC);
349 PMC_MDEP_TABLE(p6, P6, PMC_CLASS_SOFT, PMC_CLASS_TSC);
350 PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE);
351 PMC_MDEP_TABLE(cortex_a8, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7);
352 PMC_MDEP_TABLE(cortex_a9, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7);
353 PMC_MDEP_TABLE(cortex_a53, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8);
354 PMC_MDEP_TABLE(cortex_a57, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8);
355 PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K);
356 PMC_MDEP_TABLE(mips74k, MIPS74K, PMC_CLASS_SOFT, PMC_CLASS_MIPS74K);
357 PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON);
358 PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_SOFT, PMC_CLASS_PPC7450, PMC_CLASS_TSC);
359 PMC_MDEP_TABLE(ppc970, PPC970, PMC_CLASS_SOFT, PMC_CLASS_PPC970, PMC_CLASS_TSC);
360 PMC_MDEP_TABLE(e500, E500, PMC_CLASS_SOFT, PMC_CLASS_E500, PMC_CLASS_TSC);
361 PMC_MDEP_TABLE(generic, SOFT, PMC_CLASS_SOFT);
362
363 static const struct pmc_event_descr tsc_event_table[] =
364 {
365         __PMC_EV_TSC()
366 };
367
368 #undef  PMC_CLASS_TABLE_DESC
369 #define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR)    \
370 static const struct pmc_class_descr NAME##_class_table_descr =  \
371         {                                                       \
372                 .pm_evc_name  = #CLASS "-",                     \
373                 .pm_evc_name_size = sizeof(#CLASS "-") - 1,     \
374                 .pm_evc_class = PMC_CLASS_##CLASS ,             \
375                 .pm_evc_event_table = EVENTS##_event_table ,    \
376                 .pm_evc_event_table_size =                      \
377                         PMC_EVENT_TABLE_SIZE(EVENTS),           \
378                 .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \
379         }
380
381 #if     defined(__i386__) || defined(__amd64__)
382 PMC_CLASS_TABLE_DESC(iaf, IAF, iaf, iaf);
383 PMC_CLASS_TABLE_DESC(atom, IAP, atom, iap);
384 PMC_CLASS_TABLE_DESC(atom_silvermont, IAP, atom_silvermont, iap);
385 PMC_CLASS_TABLE_DESC(core, IAP, core, iap);
386 PMC_CLASS_TABLE_DESC(core2, IAP, core2, iap);
387 PMC_CLASS_TABLE_DESC(corei7, IAP, corei7, iap);
388 PMC_CLASS_TABLE_DESC(nehalem_ex, IAP, nehalem_ex, iap);
389 PMC_CLASS_TABLE_DESC(haswell, IAP, haswell, iap);
390 PMC_CLASS_TABLE_DESC(haswell_xeon, IAP, haswell_xeon, iap);
391 PMC_CLASS_TABLE_DESC(broadwell, IAP, broadwell, iap);
392 PMC_CLASS_TABLE_DESC(broadwell_xeon, IAP, broadwell_xeon, iap);
393 PMC_CLASS_TABLE_DESC(skylake, IAP, skylake, iap);
394 PMC_CLASS_TABLE_DESC(skylake_xeon, IAP, skylake_xeon, iap);
395 PMC_CLASS_TABLE_DESC(ivybridge, IAP, ivybridge, iap);
396 PMC_CLASS_TABLE_DESC(ivybridge_xeon, IAP, ivybridge_xeon, iap);
397 PMC_CLASS_TABLE_DESC(sandybridge, IAP, sandybridge, iap);
398 PMC_CLASS_TABLE_DESC(sandybridge_xeon, IAP, sandybridge_xeon, iap);
399 PMC_CLASS_TABLE_DESC(westmere, IAP, westmere, iap);
400 PMC_CLASS_TABLE_DESC(westmere_ex, IAP, westmere_ex, iap);
401 PMC_CLASS_TABLE_DESC(ucf, UCF, ucf, ucf);
402 PMC_CLASS_TABLE_DESC(corei7uc, UCP, corei7uc, ucp);
403 PMC_CLASS_TABLE_DESC(haswelluc, UCP, haswelluc, ucp);
404 PMC_CLASS_TABLE_DESC(broadwelluc, UCP, broadwelluc, ucp);
405 PMC_CLASS_TABLE_DESC(sandybridgeuc, UCP, sandybridgeuc, ucp);
406 PMC_CLASS_TABLE_DESC(westmereuc, UCP, westmereuc, ucp);
407 #endif
408 #if     defined(__i386__)
409 PMC_CLASS_TABLE_DESC(k7, K7, k7, k7);
410 #endif
411 #if     defined(__i386__) || defined(__amd64__)
412 PMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
413 PMC_CLASS_TABLE_DESC(p4, P4, p4, p4);
414 #endif
415 #if     defined(__i386__)
416 PMC_CLASS_TABLE_DESC(p5, P5, p5, p5);
417 PMC_CLASS_TABLE_DESC(p6, P6, p6, p6);
418 #endif
419 #if     defined(__i386__) || defined(__amd64__)
420 PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
421 #endif
422 #if     defined(__arm__)
423 #if     defined(__XSCALE__)
424 PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale);
425 #endif
426 PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7);
427 PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7);
428 #endif
429 #if     defined(__aarch64__)
430 PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64);
431 PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64);
432 #endif
433 #if defined(__mips__)
434 PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips);
435 PMC_CLASS_TABLE_DESC(mips74k, MIPS74K, mips74k, mips);
436 PMC_CLASS_TABLE_DESC(octeon, OCTEON, octeon, mips);
437 #endif /* __mips__ */
438 #if defined(__powerpc__)
439 PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc);
440 PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc);
441 PMC_CLASS_TABLE_DESC(e500, E500, e500, powerpc);
442 #endif
443
444 static struct pmc_class_descr soft_class_table_descr =
445 {
446         .pm_evc_name  = "SOFT-",
447         .pm_evc_name_size = sizeof("SOFT-") - 1,
448         .pm_evc_class = PMC_CLASS_SOFT,
449         .pm_evc_event_table = NULL,
450         .pm_evc_event_table_size = 0,
451         .pm_evc_allocate_pmc = soft_allocate_pmc
452 };
453
454 #undef  PMC_CLASS_TABLE_DESC
455
456 static const struct pmc_class_descr **pmc_class_table;
457 #define PMC_CLASS_TABLE_SIZE    cpu_info.pm_nclass
458
459 static const enum pmc_class *pmc_mdep_class_list;
460 static size_t pmc_mdep_class_list_size;
461
462 /*
463  * Mapping tables, mapping enumeration values to human readable
464  * strings.
465  */
466
467 static const char * pmc_capability_names[] = {
468 #undef  __PMC_CAP
469 #define __PMC_CAP(N,V,D)        #N ,
470         __PMC_CAPS()
471 };
472
473 struct pmc_class_map {
474         enum pmc_class  pm_class;
475         const char      *pm_name;
476 };
477
478 static const struct pmc_class_map pmc_class_names[] = {
479 #undef  __PMC_CLASS
480 #define __PMC_CLASS(S,V,D) { .pm_class = PMC_CLASS_##S, .pm_name = #S } ,
481         __PMC_CLASSES()
482 };
483
484 struct pmc_cputype_map {
485         enum pmc_cputype pm_cputype;
486         const char      *pm_name;
487 };
488
489 static const struct pmc_cputype_map pmc_cputype_names[] = {
490 #undef  __PMC_CPU
491 #define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } ,
492         __PMC_CPUS()
493 };
494
495 static const char * pmc_disposition_names[] = {
496 #undef  __PMC_DISP
497 #define __PMC_DISP(D)   #D ,
498         __PMC_DISPOSITIONS()
499 };
500
501 static const char * pmc_mode_names[] = {
502 #undef  __PMC_MODE
503 #define __PMC_MODE(M,N) #M ,
504         __PMC_MODES()
505 };
506
507 static const char * pmc_state_names[] = {
508 #undef  __PMC_STATE
509 #define __PMC_STATE(S) #S ,
510         __PMC_STATES()
511 };
512
513 /*
514  * Filled in by pmc_init().
515  */
516 static int pmc_syscall = -1;
517 static struct pmc_cpuinfo cpu_info;
518 static struct pmc_op_getdyneventinfo soft_event_info;
519
520 /* Event masks for events */
521 struct pmc_masks {
522         const char      *pm_name;
523         const uint64_t  pm_value;
524 };
525 #define PMCMASK(N,V)    { .pm_name = #N, .pm_value = (V) }
526 #define NULLMASK        { .pm_name = NULL }
527
528 #if defined(__amd64__) || defined(__i386__)
529 static int
530 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask)
531 {
532         const struct pmc_masks *pm;
533         char *q, *r;
534         int c;
535
536         if (pmask == NULL)      /* no mask keywords */
537                 return (-1);
538         q = strchr(p, '=');     /* skip '=' */
539         if (*++q == '\0')       /* no more data */
540                 return (-1);
541         c = 0;                  /* count of mask keywords seen */
542         while ((r = strsep(&q, "+")) != NULL) {
543                 for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name);
544                     pm++)
545                         ;
546                 if (pm->pm_name == NULL) /* not found */
547                         return (-1);
548                 *evmask |= pm->pm_value;
549                 c++;
550         }
551         return (c);
552 }
553 #endif
554
555 #define KWMATCH(p,kw)           (strcasecmp((p), (kw)) == 0)
556 #define KWPREFIXMATCH(p,kw)     (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
557 #define EV_ALIAS(N,S)           { .pm_alias = N, .pm_spec = S }
558
559 #if defined(__i386__)
560
561 /*
562  * AMD K7 (Athlon) CPUs.
563  */
564
565 static struct pmc_event_alias k7_aliases[] = {
566         EV_ALIAS("branches",            "k7-retired-branches"),
567         EV_ALIAS("branch-mispredicts",  "k7-retired-branches-mispredicted"),
568         EV_ALIAS("cycles",              "tsc"),
569         EV_ALIAS("dc-misses",           "k7-dc-misses"),
570         EV_ALIAS("ic-misses",           "k7-ic-misses"),
571         EV_ALIAS("instructions",        "k7-retired-instructions"),
572         EV_ALIAS("interrupts",          "k7-hardware-interrupts"),
573         EV_ALIAS(NULL, NULL)
574 };
575
576 #define K7_KW_COUNT     "count"
577 #define K7_KW_EDGE      "edge"
578 #define K7_KW_INV       "inv"
579 #define K7_KW_OS        "os"
580 #define K7_KW_UNITMASK  "unitmask"
581 #define K7_KW_USR       "usr"
582
583 static int
584 k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
585     struct pmc_op_pmcallocate *pmc_config)
586 {
587         char            *e, *p, *q;
588         int             c, has_unitmask;
589         uint32_t        count, unitmask;
590
591         pmc_config->pm_md.pm_amd.pm_amd_config = 0;
592         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
593
594         if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
595             pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
596             pe == PMC_EV_K7_DC_WRITEBACKS) {
597                 has_unitmask = 1;
598                 unitmask = AMD_PMC_UNITMASK_MOESI;
599         } else
600                 unitmask = has_unitmask = 0;
601
602         while ((p = strsep(&ctrspec, ",")) != NULL) {
603                 if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
604                         q = strchr(p, '=');
605                         if (*++q == '\0') /* skip '=' */
606                                 return (-1);
607
608                         count = strtol(q, &e, 0);
609                         if (e == q || *e != '\0')
610                                 return (-1);
611
612                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
613                         pmc_config->pm_md.pm_amd.pm_amd_config |=
614                             AMD_PMC_TO_COUNTER(count);
615
616                 } else if (KWMATCH(p, K7_KW_EDGE)) {
617                         pmc_config->pm_caps |= PMC_CAP_EDGE;
618                 } else if (KWMATCH(p, K7_KW_INV)) {
619                         pmc_config->pm_caps |= PMC_CAP_INVERT;
620                 } else if (KWMATCH(p, K7_KW_OS)) {
621                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
622                 } else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
623                         if (has_unitmask == 0)
624                                 return (-1);
625                         unitmask = 0;
626                         q = strchr(p, '=');
627                         if (*++q == '\0') /* skip '=' */
628                                 return (-1);
629
630                         while ((c = tolower(*q++)) != 0)
631                                 if (c == 'm')
632                                         unitmask |= AMD_PMC_UNITMASK_M;
633                                 else if (c == 'o')
634                                         unitmask |= AMD_PMC_UNITMASK_O;
635                                 else if (c == 'e')
636                                         unitmask |= AMD_PMC_UNITMASK_E;
637                                 else if (c == 's')
638                                         unitmask |= AMD_PMC_UNITMASK_S;
639                                 else if (c == 'i')
640                                         unitmask |= AMD_PMC_UNITMASK_I;
641                                 else if (c == '+')
642                                         continue;
643                                 else
644                                         return (-1);
645
646                         if (unitmask == 0)
647                                 return (-1);
648
649                 } else if (KWMATCH(p, K7_KW_USR)) {
650                         pmc_config->pm_caps |= PMC_CAP_USER;
651                 } else
652                         return (-1);
653         }
654
655         if (has_unitmask) {
656                 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
657                 pmc_config->pm_md.pm_amd.pm_amd_config |=
658                     AMD_PMC_TO_UNITMASK(unitmask);
659         }
660
661         return (0);
662
663 }
664
665 #endif
666
667 #if defined(__amd64__) || defined(__i386__)
668
669 /*
670  * Intel Core (Family 6, Model E) PMCs.
671  */
672
673 static struct pmc_event_alias core_aliases[] = {
674         EV_ALIAS("branches",            "iap-br-instr-ret"),
675         EV_ALIAS("branch-mispredicts",  "iap-br-mispred-ret"),
676         EV_ALIAS("cycles",              "tsc-tsc"),
677         EV_ALIAS("ic-misses",           "iap-icache-misses"),
678         EV_ALIAS("instructions",        "iap-instr-ret"),
679         EV_ALIAS("interrupts",          "iap-core-hw-int-rx"),
680         EV_ALIAS("unhalted-cycles",     "iap-unhalted-core-cycles"),
681         EV_ALIAS(NULL, NULL)
682 };
683
684 /*
685  * Intel Core2 (Family 6, Model F), Core2Extreme (Family 6, Model 17H)
686  * and Atom (Family 6, model 1CH) PMCs.
687  *
688  * We map aliases to events on the fixed-function counters if these
689  * are present.  Note that not all CPUs in this family contain fixed-function
690  * counters.
691  */
692
693 static struct pmc_event_alias core2_aliases[] = {
694         EV_ALIAS("branches",            "iap-br-inst-retired.any"),
695         EV_ALIAS("branch-mispredicts",  "iap-br-inst-retired.mispred"),
696         EV_ALIAS("cycles",              "tsc-tsc"),
697         EV_ALIAS("ic-misses",           "iap-l1i-misses"),
698         EV_ALIAS("instructions",        "iaf-instr-retired.any"),
699         EV_ALIAS("interrupts",          "iap-hw-int-rcv"),
700         EV_ALIAS("unhalted-cycles",     "iaf-cpu-clk-unhalted.core"),
701         EV_ALIAS(NULL, NULL)
702 };
703
704 static struct pmc_event_alias core2_aliases_without_iaf[] = {
705         EV_ALIAS("branches",            "iap-br-inst-retired.any"),
706         EV_ALIAS("branch-mispredicts",  "iap-br-inst-retired.mispred"),
707         EV_ALIAS("cycles",              "tsc-tsc"),
708         EV_ALIAS("ic-misses",           "iap-l1i-misses"),
709         EV_ALIAS("instructions",        "iap-inst-retired.any_p"),
710         EV_ALIAS("interrupts",          "iap-hw-int-rcv"),
711         EV_ALIAS("unhalted-cycles",     "iap-cpu-clk-unhalted.core_p"),
712         EV_ALIAS(NULL, NULL)
713 };
714
715 #define atom_aliases                    core2_aliases
716 #define atom_aliases_without_iaf        core2_aliases_without_iaf
717 #define atom_silvermont_aliases         core2_aliases
718 #define atom_silvermont_aliases_without_iaf     core2_aliases_without_iaf
719 #define corei7_aliases                  core2_aliases
720 #define corei7_aliases_without_iaf      core2_aliases_without_iaf
721 #define nehalem_ex_aliases              core2_aliases
722 #define nehalem_ex_aliases_without_iaf  core2_aliases_without_iaf
723 #define haswell_aliases                 core2_aliases
724 #define haswell_aliases_without_iaf     core2_aliases_without_iaf
725 #define haswell_xeon_aliases                    core2_aliases
726 #define haswell_xeon_aliases_without_iaf        core2_aliases_without_iaf
727 #define broadwell_aliases                       core2_aliases
728 #define broadwell_aliases_without_iaf   core2_aliases_without_iaf
729 #define broadwell_xeon_aliases                  core2_aliases
730 #define broadwell_xeon_aliases_without_iaf      core2_aliases_without_iaf
731 #define skylake_aliases                 core2_aliases
732 #define skylake_aliases_without_iaf     core2_aliases_without_iaf
733 #define skylake_xeon_aliases            core2_aliases
734 #define skylake_xeon_aliases_without_iaf        core2_aliases_without_iaf
735 #define ivybridge_aliases               core2_aliases
736 #define ivybridge_aliases_without_iaf   core2_aliases_without_iaf
737 #define ivybridge_xeon_aliases          core2_aliases
738 #define ivybridge_xeon_aliases_without_iaf      core2_aliases_without_iaf
739 #define sandybridge_aliases             core2_aliases
740 #define sandybridge_aliases_without_iaf core2_aliases_without_iaf
741 #define sandybridge_xeon_aliases        core2_aliases
742 #define sandybridge_xeon_aliases_without_iaf    core2_aliases_without_iaf
743 #define westmere_aliases                core2_aliases
744 #define westmere_aliases_without_iaf    core2_aliases_without_iaf
745 #define westmere_ex_aliases             core2_aliases
746 #define westmere_ex_aliases_without_iaf core2_aliases_without_iaf
747
748 #define IAF_KW_OS               "os"
749 #define IAF_KW_USR              "usr"
750 #define IAF_KW_ANYTHREAD        "anythread"
751
752 /*
753  * Parse an event specifier for Intel fixed function counters.
754  */
755 static int
756 iaf_allocate_pmc(enum pmc_event pe, char *ctrspec,
757     struct pmc_op_pmcallocate *pmc_config)
758 {
759         char *p;
760
761         (void) pe;
762
763         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
764         pmc_config->pm_md.pm_iaf.pm_iaf_flags = 0;
765
766         while ((p = strsep(&ctrspec, ",")) != NULL) {
767                 if (KWMATCH(p, IAF_KW_OS))
768                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
769                 else if (KWMATCH(p, IAF_KW_USR))
770                         pmc_config->pm_caps |= PMC_CAP_USER;
771                 else if (KWMATCH(p, IAF_KW_ANYTHREAD))
772                         pmc_config->pm_md.pm_iaf.pm_iaf_flags |= IAF_ANY;
773                 else
774                         return (-1);
775         }
776
777         return (0);
778 }
779
780 /*
781  * Core/Core2 support.
782  */
783
784 #define IAP_KW_AGENT            "agent"
785 #define IAP_KW_ANYTHREAD        "anythread"
786 #define IAP_KW_CACHESTATE       "cachestate"
787 #define IAP_KW_CMASK            "cmask"
788 #define IAP_KW_CORE             "core"
789 #define IAP_KW_EDGE             "edge"
790 #define IAP_KW_INV              "inv"
791 #define IAP_KW_OS               "os"
792 #define IAP_KW_PREFETCH         "prefetch"
793 #define IAP_KW_SNOOPRESPONSE    "snoopresponse"
794 #define IAP_KW_SNOOPTYPE        "snooptype"
795 #define IAP_KW_TRANSITION       "trans"
796 #define IAP_KW_USR              "usr"
797 #define IAP_KW_RSP              "rsp"
798
799 static struct pmc_masks iap_core_mask[] = {
800         PMCMASK(all,    (0x3 << 14)),
801         PMCMASK(this,   (0x1 << 14)),
802         NULLMASK
803 };
804
805 static struct pmc_masks iap_agent_mask[] = {
806         PMCMASK(this,   0),
807         PMCMASK(any,    (0x1 << 13)),
808         NULLMASK
809 };
810
811 static struct pmc_masks iap_prefetch_mask[] = {
812         PMCMASK(both,           (0x3 << 12)),
813         PMCMASK(only,           (0x1 << 12)),
814         PMCMASK(exclude,        0),
815         NULLMASK
816 };
817
818 static struct pmc_masks iap_cachestate_mask[] = {
819         PMCMASK(i,              (1 <<  8)),
820         PMCMASK(s,              (1 <<  9)),
821         PMCMASK(e,              (1 << 10)),
822         PMCMASK(m,              (1 << 11)),
823         NULLMASK
824 };
825
826 static struct pmc_masks iap_snoopresponse_mask[] = {
827         PMCMASK(clean,          (1 << 8)),
828         PMCMASK(hit,            (1 << 9)),
829         PMCMASK(hitm,           (1 << 11)),
830         NULLMASK
831 };
832
833 static struct pmc_masks iap_snooptype_mask[] = {
834         PMCMASK(cmp2s,          (1 << 8)),
835         PMCMASK(cmp2i,          (1 << 9)),
836         NULLMASK
837 };
838
839 static struct pmc_masks iap_transition_mask[] = {
840         PMCMASK(any,            0x00),
841         PMCMASK(frequency,      0x10),
842         NULLMASK
843 };
844
845 static struct pmc_masks iap_rsp_mask_i7_wm[] = {
846         PMCMASK(DMND_DATA_RD,           (1 <<  0)),
847         PMCMASK(DMND_RFO,               (1 <<  1)),
848         PMCMASK(DMND_IFETCH,            (1 <<  2)),
849         PMCMASK(WB,                     (1 <<  3)),
850         PMCMASK(PF_DATA_RD,             (1 <<  4)),
851         PMCMASK(PF_RFO,                 (1 <<  5)),
852         PMCMASK(PF_IFETCH,              (1 <<  6)),
853         PMCMASK(OTHER,                  (1 <<  7)),
854         PMCMASK(UNCORE_HIT,             (1 <<  8)),
855         PMCMASK(OTHER_CORE_HIT_SNP,     (1 <<  9)),
856         PMCMASK(OTHER_CORE_HITM,        (1 << 10)),
857         PMCMASK(REMOTE_CACHE_FWD,       (1 << 12)),
858         PMCMASK(REMOTE_DRAM,            (1 << 13)),
859         PMCMASK(LOCAL_DRAM,             (1 << 14)),
860         PMCMASK(NON_DRAM,               (1 << 15)),
861         NULLMASK
862 };
863
864 static struct pmc_masks iap_rsp_mask_sb_sbx_ib[] = {
865         PMCMASK(REQ_DMND_DATA_RD,       (1ULL <<  0)),
866         PMCMASK(REQ_DMND_RFO,           (1ULL <<  1)),
867         PMCMASK(REQ_DMND_IFETCH,        (1ULL <<  2)),
868         PMCMASK(REQ_WB,                 (1ULL <<  3)),
869         PMCMASK(REQ_PF_DATA_RD,         (1ULL <<  4)),
870         PMCMASK(REQ_PF_RFO,             (1ULL <<  5)),
871         PMCMASK(REQ_PF_IFETCH,          (1ULL <<  6)),
872         PMCMASK(REQ_PF_LLC_DATA_RD,     (1ULL <<  7)),
873         PMCMASK(REQ_PF_LLC_RFO,         (1ULL <<  8)),
874         PMCMASK(REQ_PF_LLC_IFETCH,      (1ULL <<  9)),
875         PMCMASK(REQ_BUS_LOCKS,          (1ULL << 10)),
876         PMCMASK(REQ_STRM_ST,            (1ULL << 11)),
877         PMCMASK(REQ_OTHER,              (1ULL << 15)),
878         PMCMASK(RES_ANY,                (1ULL << 16)),
879         PMCMASK(RES_SUPPLIER_SUPP,      (1ULL << 17)),
880         PMCMASK(RES_SUPPLIER_LLC_HITM,  (1ULL << 18)),
881         PMCMASK(RES_SUPPLIER_LLC_HITE,  (1ULL << 19)),
882         PMCMASK(RES_SUPPLIER_LLC_HITS,  (1ULL << 20)),
883         PMCMASK(RES_SUPPLIER_LLC_HITF,  (1ULL << 21)),
884         PMCMASK(RES_SUPPLIER_LOCAL,     (1ULL << 22)),
885         PMCMASK(RES_SNOOP_SNP_NONE,     (1ULL << 31)),
886         PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
887         PMCMASK(RES_SNOOP_SNP_MISS,     (1ULL << 33)),
888         PMCMASK(RES_SNOOP_HIT_NO_FWD,   (1ULL << 34)),
889         PMCMASK(RES_SNOOP_HIT_FWD,      (1ULL << 35)),
890         PMCMASK(RES_SNOOP_HITM,         (1ULL << 36)),
891         PMCMASK(RES_NON_DRAM,           (1ULL << 37)),
892         NULLMASK
893 };
894
895 /* Broadwell is defined to use the same mask as Haswell */
896 static struct pmc_masks iap_rsp_mask_haswell[] = {
897         PMCMASK(REQ_DMND_DATA_RD,       (1ULL <<  0)),
898         PMCMASK(REQ_DMND_RFO,           (1ULL <<  1)),
899         PMCMASK(REQ_DMND_IFETCH,        (1ULL <<  2)),
900         PMCMASK(REQ_PF_DATA_RD,         (1ULL <<  4)),
901         PMCMASK(REQ_PF_RFO,             (1ULL <<  5)),
902         PMCMASK(REQ_PF_IFETCH,          (1ULL <<  6)),
903         PMCMASK(REQ_OTHER,              (1ULL << 15)),
904         PMCMASK(RES_ANY,                (1ULL << 16)),
905         PMCMASK(RES_SUPPLIER_SUPP,      (1ULL << 17)),
906         PMCMASK(RES_SUPPLIER_LLC_HITM,  (1ULL << 18)),
907         PMCMASK(RES_SUPPLIER_LLC_HITE,  (1ULL << 19)),
908         PMCMASK(RES_SUPPLIER_LLC_HITS,  (1ULL << 20)),
909         PMCMASK(RES_SUPPLIER_LLC_HITF,  (1ULL << 21)),
910         PMCMASK(RES_SUPPLIER_LOCAL,     (1ULL << 22)),
911         /* 
912          * For processor type 06_45H 22 is L4_HIT_LOCAL_L4
913          * and 23, 24 and 25 are also defined.
914          */
915         PMCMASK(RES_SNOOP_SNP_NONE,     (1ULL << 31)),
916         PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
917         PMCMASK(RES_SNOOP_SNP_MISS,     (1ULL << 33)),
918         PMCMASK(RES_SNOOP_HIT_NO_FWD,   (1ULL << 34)),
919         PMCMASK(RES_SNOOP_HIT_FWD,      (1ULL << 35)),
920         PMCMASK(RES_SNOOP_HITM,         (1ULL << 36)),
921         PMCMASK(RES_NON_DRAM,           (1ULL << 37)),
922         NULLMASK
923 };
924
925 static struct pmc_masks iap_rsp_mask_skylake[] = {
926         PMCMASK(REQ_DMND_DATA_RD,       (1ULL <<  0)),
927         PMCMASK(REQ_DMND_RFO,           (1ULL <<  1)),
928         PMCMASK(REQ_DMND_IFETCH,        (1ULL <<  2)),
929         PMCMASK(REQ_PF_DATA_RD,         (1ULL <<  7)),
930         PMCMASK(REQ_PF_RFO,             (1ULL <<  8)),
931         PMCMASK(REQ_STRM_ST,            (1ULL << 11)),
932         PMCMASK(REQ_OTHER,              (1ULL << 15)),
933         PMCMASK(RES_ANY,                (1ULL << 16)),
934         PMCMASK(RES_SUPPLIER_SUPP,      (1ULL << 17)),
935         PMCMASK(RES_SUPPLIER_LLC_HITM,  (1ULL << 18)),
936         PMCMASK(RES_SUPPLIER_LLC_HITE,  (1ULL << 19)),
937         PMCMASK(RES_SUPPLIER_LLC_HITS,  (1ULL << 20)),
938         PMCMASK(RES_SUPPLIER_L4_HIT,    (1ULL << 22)),
939         PMCMASK(RES_SUPPLIER_DRAM,      (1ULL << 26)),
940         PMCMASK(RES_SUPPLIER_SPL_HIT,   (1ULL << 30)),
941         PMCMASK(RES_SNOOP_SNP_NONE,     (1ULL << 31)),
942         PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
943         PMCMASK(RES_SNOOP_SNP_MISS,     (1ULL << 33)),
944         PMCMASK(RES_SNOOP_HIT_NO_FWD,   (1ULL << 34)),
945         PMCMASK(RES_SNOOP_HIT_FWD,      (1ULL << 35)),
946         PMCMASK(RES_SNOOP_HITM,         (1ULL << 36)),
947         PMCMASK(RES_NON_DRAM,           (1ULL << 37)),
948         NULLMASK
949 };
950
951
952 static int
953 iap_allocate_pmc(enum pmc_event pe, char *ctrspec,
954     struct pmc_op_pmcallocate *pmc_config)
955 {
956         char *e, *p, *q;
957         uint64_t cachestate, evmask, rsp;
958         int count, n;
959
960         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
961             PMC_CAP_QUALIFIER);
962         pmc_config->pm_md.pm_iap.pm_iap_config = 0;
963
964         cachestate = evmask = rsp = 0;
965
966         /* Parse additional modifiers if present */
967         while ((p = strsep(&ctrspec, ",")) != NULL) {
968
969                 n = 0;
970                 if (KWPREFIXMATCH(p, IAP_KW_CMASK "=")) {
971                         q = strchr(p, '=');
972                         if (*++q == '\0') /* skip '=' */
973                                 return (-1);
974                         count = strtol(q, &e, 0);
975                         if (e == q || *e != '\0')
976                                 return (-1);
977                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
978                         pmc_config->pm_md.pm_iap.pm_iap_config |=
979                             IAP_CMASK(count);
980                 } else if (KWMATCH(p, IAP_KW_EDGE)) {
981                         pmc_config->pm_caps |= PMC_CAP_EDGE;
982                 } else if (KWMATCH(p, IAP_KW_INV)) {
983                         pmc_config->pm_caps |= PMC_CAP_INVERT;
984                 } else if (KWMATCH(p, IAP_KW_OS)) {
985                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
986                 } else if (KWMATCH(p, IAP_KW_USR)) {
987                         pmc_config->pm_caps |= PMC_CAP_USER;
988                 } else if (KWMATCH(p, IAP_KW_ANYTHREAD)) {
989                         pmc_config->pm_md.pm_iap.pm_iap_config |= IAP_ANY;
990                 } else if (KWPREFIXMATCH(p, IAP_KW_CORE "=")) {
991                         n = pmc_parse_mask(iap_core_mask, p, &evmask);
992                         if (n != 1)
993                                 return (-1);
994                 } else if (KWPREFIXMATCH(p, IAP_KW_AGENT "=")) {
995                         n = pmc_parse_mask(iap_agent_mask, p, &evmask);
996                         if (n != 1)
997                                 return (-1);
998                 } else if (KWPREFIXMATCH(p, IAP_KW_PREFETCH "=")) {
999                         n = pmc_parse_mask(iap_prefetch_mask, p, &evmask);
1000                         if (n != 1)
1001                                 return (-1);
1002                 } else if (KWPREFIXMATCH(p, IAP_KW_CACHESTATE "=")) {
1003                         n = pmc_parse_mask(iap_cachestate_mask, p, &cachestate);
1004                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_CORE &&
1005                     KWPREFIXMATCH(p, IAP_KW_TRANSITION "=")) {
1006                         n = pmc_parse_mask(iap_transition_mask, p, &evmask);
1007                         if (n != 1)
1008                                 return (-1);
1009                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM ||
1010                     cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM_SILVERMONT ||
1011                     cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2 ||
1012                     cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2EXTREME) {
1013                         if (KWPREFIXMATCH(p, IAP_KW_SNOOPRESPONSE "=")) {
1014                                 n = pmc_parse_mask(iap_snoopresponse_mask, p,
1015                                     &evmask);
1016                         } else if (KWPREFIXMATCH(p, IAP_KW_SNOOPTYPE "=")) {
1017                                 n = pmc_parse_mask(iap_snooptype_mask, p,
1018                                     &evmask);
1019                         } else
1020                                 return (-1);
1021                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_COREI7 ||
1022                     cpu_info.pm_cputype == PMC_CPU_INTEL_WESTMERE ||
1023                     cpu_info.pm_cputype == PMC_CPU_INTEL_NEHALEM_EX ||
1024                     cpu_info.pm_cputype == PMC_CPU_INTEL_WESTMERE_EX) {
1025                         if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1026                                 n = pmc_parse_mask(iap_rsp_mask_i7_wm, p, &rsp);
1027                         } else
1028                                 return (-1);
1029                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_SANDYBRIDGE ||
1030                     cpu_info.pm_cputype == PMC_CPU_INTEL_SANDYBRIDGE_XEON ||
1031                     cpu_info.pm_cputype == PMC_CPU_INTEL_IVYBRIDGE ||
1032                     cpu_info.pm_cputype == PMC_CPU_INTEL_IVYBRIDGE_XEON ) {
1033                         if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1034                                 n = pmc_parse_mask(iap_rsp_mask_sb_sbx_ib, p, &rsp);
1035                         } else
1036                                 return (-1);
1037                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_HASWELL ||
1038                     cpu_info.pm_cputype == PMC_CPU_INTEL_HASWELL_XEON) {
1039                         if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1040                                 n = pmc_parse_mask(iap_rsp_mask_haswell, p, &rsp);
1041                         } else
1042                                 return (-1);
1043                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_BROADWELL ||
1044                     cpu_info.pm_cputype == PMC_CPU_INTEL_BROADWELL_XEON) {
1045                         /* Broadwell is defined to use same mask as haswell */
1046                         if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1047                                 n = pmc_parse_mask(iap_rsp_mask_haswell, p, &rsp);
1048                         } else
1049                                 return (-1);
1050
1051                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_SKYLAKE ||
1052                     cpu_info.pm_cputype == PMC_CPU_INTEL_SKYLAKE_XEON) {
1053                         if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1054                                 n = pmc_parse_mask(iap_rsp_mask_skylake, p, &rsp);
1055                         } else
1056                                 return (-1);
1057
1058                 } else
1059                         return (-1);
1060
1061                 if (n < 0)      /* Parsing failed. */
1062                         return (-1);
1063         }
1064
1065         pmc_config->pm_md.pm_iap.pm_iap_config |= evmask;
1066
1067         /*
1068          * If the event requires a 'cachestate' qualifier but was not
1069          * specified by the user, use a sensible default.
1070          */
1071         switch (pe) {
1072         case PMC_EV_IAP_EVENT_28H: /* Core, Core2, Atom */
1073         case PMC_EV_IAP_EVENT_29H: /* Core, Core2, Atom */
1074         case PMC_EV_IAP_EVENT_2AH: /* Core, Core2, Atom */
1075         case PMC_EV_IAP_EVENT_2BH: /* Atom, Core2 */
1076         case PMC_EV_IAP_EVENT_2EH: /* Core, Core2, Atom */
1077         case PMC_EV_IAP_EVENT_30H: /* Core, Core2, Atom */
1078         case PMC_EV_IAP_EVENT_32H: /* Core */
1079         case PMC_EV_IAP_EVENT_40H: /* Core */
1080         case PMC_EV_IAP_EVENT_41H: /* Core */
1081         case PMC_EV_IAP_EVENT_42H: /* Core, Core2, Atom */
1082                 if (cachestate == 0)
1083                         cachestate = (0xF << 8);
1084                 break;
1085         case PMC_EV_IAP_EVENT_77H: /* Atom */
1086                 /* IAP_EVENT_77H only accepts a cachestate qualifier on the
1087                  * Atom processor
1088                  */
1089                 if(cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM && cachestate == 0)
1090                         cachestate = (0xF << 8);
1091             break;
1092         default:
1093                 break;
1094         }
1095
1096         pmc_config->pm_md.pm_iap.pm_iap_config |= cachestate;
1097         pmc_config->pm_md.pm_iap.pm_iap_rsp = rsp;
1098
1099         return (0);
1100 }
1101
1102 /*
1103  * Intel Uncore.
1104  */
1105
1106 static int
1107 ucf_allocate_pmc(enum pmc_event pe, char *ctrspec,
1108     struct pmc_op_pmcallocate *pmc_config)
1109 {
1110         (void) pe;
1111         (void) ctrspec;
1112
1113         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1114         pmc_config->pm_md.pm_ucf.pm_ucf_flags = 0;
1115
1116         return (0);
1117 }
1118
1119 #define UCP_KW_CMASK            "cmask"
1120 #define UCP_KW_EDGE             "edge"
1121 #define UCP_KW_INV              "inv"
1122
1123 static int
1124 ucp_allocate_pmc(enum pmc_event pe, char *ctrspec,
1125     struct pmc_op_pmcallocate *pmc_config)
1126 {
1127         char *e, *p, *q;
1128         int count, n;
1129
1130         (void) pe;
1131
1132         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
1133             PMC_CAP_QUALIFIER);
1134         pmc_config->pm_md.pm_ucp.pm_ucp_config = 0;
1135
1136         /* Parse additional modifiers if present */
1137         while ((p = strsep(&ctrspec, ",")) != NULL) {
1138
1139                 n = 0;
1140                 if (KWPREFIXMATCH(p, UCP_KW_CMASK "=")) {
1141                         q = strchr(p, '=');
1142                         if (*++q == '\0') /* skip '=' */
1143                                 return (-1);
1144                         count = strtol(q, &e, 0);
1145                         if (e == q || *e != '\0')
1146                                 return (-1);
1147                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1148                         pmc_config->pm_md.pm_ucp.pm_ucp_config |=
1149                             UCP_CMASK(count);
1150                 } else if (KWMATCH(p, UCP_KW_EDGE)) {
1151                         pmc_config->pm_caps |= PMC_CAP_EDGE;
1152                 } else if (KWMATCH(p, UCP_KW_INV)) {
1153                         pmc_config->pm_caps |= PMC_CAP_INVERT;
1154                 } else
1155                         return (-1);
1156
1157                 if (n < 0)      /* Parsing failed. */
1158                         return (-1);
1159         }
1160
1161         return (0);
1162 }
1163
1164 /*
1165  * AMD K8 PMCs.
1166  *
1167  * These are very similar to AMD K7 PMCs, but support more kinds of
1168  * events.
1169  */
1170
1171 static struct pmc_event_alias k8_aliases[] = {
1172         EV_ALIAS("branches",            "k8-fr-retired-taken-branches"),
1173         EV_ALIAS("branch-mispredicts",
1174             "k8-fr-retired-taken-branches-mispredicted"),
1175         EV_ALIAS("cycles",              "tsc"),
1176         EV_ALIAS("dc-misses",           "k8-dc-miss"),
1177         EV_ALIAS("ic-misses",           "k8-ic-miss"),
1178         EV_ALIAS("instructions",        "k8-fr-retired-x86-instructions"),
1179         EV_ALIAS("interrupts",          "k8-fr-taken-hardware-interrupts"),
1180         EV_ALIAS("unhalted-cycles",     "k8-bu-cpu-clk-unhalted"),
1181         EV_ALIAS(NULL, NULL)
1182 };
1183
1184 #define __K8MASK(N,V) PMCMASK(N,(1 << (V)))
1185
1186 /*
1187  * Parsing tables
1188  */
1189
1190 /* fp dispatched fpu ops */
1191 static const struct pmc_masks k8_mask_fdfo[] = {
1192         __K8MASK(add-pipe-excluding-junk-ops,   0),
1193         __K8MASK(multiply-pipe-excluding-junk-ops,      1),
1194         __K8MASK(store-pipe-excluding-junk-ops, 2),
1195         __K8MASK(add-pipe-junk-ops,             3),
1196         __K8MASK(multiply-pipe-junk-ops,        4),
1197         __K8MASK(store-pipe-junk-ops,           5),
1198         NULLMASK
1199 };
1200
1201 /* ls segment register loads */
1202 static const struct pmc_masks k8_mask_lsrl[] = {
1203         __K8MASK(es,    0),
1204         __K8MASK(cs,    1),
1205         __K8MASK(ss,    2),
1206         __K8MASK(ds,    3),
1207         __K8MASK(fs,    4),
1208         __K8MASK(gs,    5),
1209         __K8MASK(hs,    6),
1210         NULLMASK
1211 };
1212
1213 /* ls locked operation */
1214 static const struct pmc_masks k8_mask_llo[] = {
1215         __K8MASK(locked-instructions,   0),
1216         __K8MASK(cycles-in-request,     1),
1217         __K8MASK(cycles-to-complete,    2),
1218         NULLMASK
1219 };
1220
1221 /* dc refill from {l2,system} and dc copyback */
1222 static const struct pmc_masks k8_mask_dc[] = {
1223         __K8MASK(invalid,       0),
1224         __K8MASK(shared,        1),
1225         __K8MASK(exclusive,     2),
1226         __K8MASK(owner,         3),
1227         __K8MASK(modified,      4),
1228         NULLMASK
1229 };
1230
1231 /* dc one bit ecc error */
1232 static const struct pmc_masks k8_mask_dobee[] = {
1233         __K8MASK(scrubber,      0),
1234         __K8MASK(piggyback,     1),
1235         NULLMASK
1236 };
1237
1238 /* dc dispatched prefetch instructions */
1239 static const struct pmc_masks k8_mask_ddpi[] = {
1240         __K8MASK(load,  0),
1241         __K8MASK(store, 1),
1242         __K8MASK(nta,   2),
1243         NULLMASK
1244 };
1245
1246 /* dc dcache accesses by locks */
1247 static const struct pmc_masks k8_mask_dabl[] = {
1248         __K8MASK(accesses,      0),
1249         __K8MASK(misses,        1),
1250         NULLMASK
1251 };
1252
1253 /* bu internal l2 request */
1254 static const struct pmc_masks k8_mask_bilr[] = {
1255         __K8MASK(ic-fill,       0),
1256         __K8MASK(dc-fill,       1),
1257         __K8MASK(tlb-reload,    2),
1258         __K8MASK(tag-snoop,     3),
1259         __K8MASK(cancelled,     4),
1260         NULLMASK
1261 };
1262
1263 /* bu fill request l2 miss */
1264 static const struct pmc_masks k8_mask_bfrlm[] = {
1265         __K8MASK(ic-fill,       0),
1266         __K8MASK(dc-fill,       1),
1267         __K8MASK(tlb-reload,    2),
1268         NULLMASK
1269 };
1270
1271 /* bu fill into l2 */
1272 static const struct pmc_masks k8_mask_bfil[] = {
1273         __K8MASK(dirty-l2-victim,       0),
1274         __K8MASK(victim-from-l2,        1),
1275         NULLMASK
1276 };
1277
1278 /* fr retired fpu instructions */
1279 static const struct pmc_masks k8_mask_frfi[] = {
1280         __K8MASK(x87,                   0),
1281         __K8MASK(mmx-3dnow,             1),
1282         __K8MASK(packed-sse-sse2,       2),
1283         __K8MASK(scalar-sse-sse2,       3),
1284         NULLMASK
1285 };
1286
1287 /* fr retired fastpath double op instructions */
1288 static const struct pmc_masks k8_mask_frfdoi[] = {
1289         __K8MASK(low-op-pos-0,          0),
1290         __K8MASK(low-op-pos-1,          1),
1291         __K8MASK(low-op-pos-2,          2),
1292         NULLMASK
1293 };
1294
1295 /* fr fpu exceptions */
1296 static const struct pmc_masks k8_mask_ffe[] = {
1297         __K8MASK(x87-reclass-microfaults,       0),
1298         __K8MASK(sse-retype-microfaults,        1),
1299         __K8MASK(sse-reclass-microfaults,       2),
1300         __K8MASK(sse-and-x87-microtraps,        3),
1301         NULLMASK
1302 };
1303
1304 /* nb memory controller page access event */
1305 static const struct pmc_masks k8_mask_nmcpae[] = {
1306         __K8MASK(page-hit,      0),
1307         __K8MASK(page-miss,     1),
1308         __K8MASK(page-conflict, 2),
1309         NULLMASK
1310 };
1311
1312 /* nb memory controller turnaround */
1313 static const struct pmc_masks k8_mask_nmct[] = {
1314         __K8MASK(dimm-turnaround,               0),
1315         __K8MASK(read-to-write-turnaround,      1),
1316         __K8MASK(write-to-read-turnaround,      2),
1317         NULLMASK
1318 };
1319
1320 /* nb memory controller bypass saturation */
1321 static const struct pmc_masks k8_mask_nmcbs[] = {
1322         __K8MASK(memory-controller-hi-pri-bypass,       0),
1323         __K8MASK(memory-controller-lo-pri-bypass,       1),
1324         __K8MASK(dram-controller-interface-bypass,      2),
1325         __K8MASK(dram-controller-queue-bypass,          3),
1326         NULLMASK
1327 };
1328
1329 /* nb sized commands */
1330 static const struct pmc_masks k8_mask_nsc[] = {
1331         __K8MASK(nonpostwrszbyte,       0),
1332         __K8MASK(nonpostwrszdword,      1),
1333         __K8MASK(postwrszbyte,          2),
1334         __K8MASK(postwrszdword,         3),
1335         __K8MASK(rdszbyte,              4),
1336         __K8MASK(rdszdword,             5),
1337         __K8MASK(rdmodwr,               6),
1338         NULLMASK
1339 };
1340
1341 /* nb probe result */
1342 static const struct pmc_masks k8_mask_npr[] = {
1343         __K8MASK(probe-miss,            0),
1344         __K8MASK(probe-hit,             1),
1345         __K8MASK(probe-hit-dirty-no-memory-cancel, 2),
1346         __K8MASK(probe-hit-dirty-with-memory-cancel, 3),
1347         NULLMASK
1348 };
1349
1350 /* nb hypertransport bus bandwidth */
1351 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
1352         __K8MASK(command,       0),
1353         __K8MASK(data,  1),
1354         __K8MASK(buffer-release, 2),
1355         __K8MASK(nop,   3),
1356         NULLMASK
1357 };
1358
1359 #undef  __K8MASK
1360
1361 #define K8_KW_COUNT     "count"
1362 #define K8_KW_EDGE      "edge"
1363 #define K8_KW_INV       "inv"
1364 #define K8_KW_MASK      "mask"
1365 #define K8_KW_OS        "os"
1366 #define K8_KW_USR       "usr"
1367
1368 static int
1369 k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
1370     struct pmc_op_pmcallocate *pmc_config)
1371 {
1372         char            *e, *p, *q;
1373         int             n;
1374         uint32_t        count;
1375         uint64_t        evmask;
1376         const struct pmc_masks  *pm, *pmask;
1377
1378         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1379         pmc_config->pm_md.pm_amd.pm_amd_config = 0;
1380
1381         pmask = NULL;
1382         evmask = 0;
1383
1384 #define __K8SETMASK(M) pmask = k8_mask_##M
1385
1386         /* setup parsing tables */
1387         switch (pe) {
1388         case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1389                 __K8SETMASK(fdfo);
1390                 break;
1391         case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
1392                 __K8SETMASK(lsrl);
1393                 break;
1394         case PMC_EV_K8_LS_LOCKED_OPERATION:
1395                 __K8SETMASK(llo);
1396                 break;
1397         case PMC_EV_K8_DC_REFILL_FROM_L2:
1398         case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
1399         case PMC_EV_K8_DC_COPYBACK:
1400                 __K8SETMASK(dc);
1401                 break;
1402         case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
1403                 __K8SETMASK(dobee);
1404                 break;
1405         case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
1406                 __K8SETMASK(ddpi);
1407                 break;
1408         case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1409                 __K8SETMASK(dabl);
1410                 break;
1411         case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
1412                 __K8SETMASK(bilr);
1413                 break;
1414         case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
1415                 __K8SETMASK(bfrlm);
1416                 break;
1417         case PMC_EV_K8_BU_FILL_INTO_L2:
1418                 __K8SETMASK(bfil);
1419                 break;
1420         case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1421                 __K8SETMASK(frfi);
1422                 break;
1423         case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1424                 __K8SETMASK(frfdoi);
1425                 break;
1426         case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1427                 __K8SETMASK(ffe);
1428                 break;
1429         case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
1430                 __K8SETMASK(nmcpae);
1431                 break;
1432         case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
1433                 __K8SETMASK(nmct);
1434                 break;
1435         case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
1436                 __K8SETMASK(nmcbs);
1437                 break;
1438         case PMC_EV_K8_NB_SIZED_COMMANDS:
1439                 __K8SETMASK(nsc);
1440                 break;
1441         case PMC_EV_K8_NB_PROBE_RESULT:
1442                 __K8SETMASK(npr);
1443                 break;
1444         case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
1445         case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
1446         case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
1447                 __K8SETMASK(nhbb);
1448                 break;
1449
1450         default:
1451                 break;          /* no options defined */
1452         }
1453
1454         while ((p = strsep(&ctrspec, ",")) != NULL) {
1455                 if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
1456                         q = strchr(p, '=');
1457                         if (*++q == '\0') /* skip '=' */
1458                                 return (-1);
1459
1460                         count = strtol(q, &e, 0);
1461                         if (e == q || *e != '\0')
1462                                 return (-1);
1463
1464                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1465                         pmc_config->pm_md.pm_amd.pm_amd_config |=
1466                             AMD_PMC_TO_COUNTER(count);
1467
1468                 } else if (KWMATCH(p, K8_KW_EDGE)) {
1469                         pmc_config->pm_caps |= PMC_CAP_EDGE;
1470                 } else if (KWMATCH(p, K8_KW_INV)) {
1471                         pmc_config->pm_caps |= PMC_CAP_INVERT;
1472                 } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
1473                         if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1474                                 return (-1);
1475                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1476                 } else if (KWMATCH(p, K8_KW_OS)) {
1477                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1478                 } else if (KWMATCH(p, K8_KW_USR)) {
1479                         pmc_config->pm_caps |= PMC_CAP_USER;
1480                 } else
1481                         return (-1);
1482         }
1483
1484         /* other post processing */
1485         switch (pe) {
1486         case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1487         case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
1488         case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
1489         case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1490         case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1491         case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1492                 /* XXX only available in rev B and later */
1493                 break;
1494         case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1495                 /* XXX only available in rev C and later */
1496                 break;
1497         case PMC_EV_K8_LS_LOCKED_OPERATION:
1498                 /* XXX CPU Rev A,B evmask is to be zero */
1499                 if (evmask & (evmask - 1)) /* > 1 bit set */
1500                         return (-1);
1501                 if (evmask == 0) {
1502                         evmask = 0x01; /* Rev C and later: #instrs */
1503                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1504                 }
1505                 break;
1506         default:
1507                 if (evmask == 0 && pmask != NULL) {
1508                         for (pm = pmask; pm->pm_name; pm++)
1509                                 evmask |= pm->pm_value;
1510                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1511                 }
1512         }
1513
1514         if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1515                 pmc_config->pm_md.pm_amd.pm_amd_config =
1516                     AMD_PMC_TO_UNITMASK(evmask);
1517
1518         return (0);
1519 }
1520
1521 #endif
1522
1523 #if defined(__amd64__) || defined(__i386__)
1524
1525 /*
1526  * Intel P4 PMCs
1527  */
1528
1529 static struct pmc_event_alias p4_aliases[] = {
1530         EV_ALIAS("branches",            "p4-branch-retired,mask=mmtp+mmtm"),
1531         EV_ALIAS("branch-mispredicts",  "p4-mispred-branch-retired"),
1532         EV_ALIAS("cycles",              "tsc"),
1533         EV_ALIAS("instructions",
1534             "p4-instr-retired,mask=nbogusntag+nbogustag"),
1535         EV_ALIAS("unhalted-cycles",     "p4-global-power-events"),
1536         EV_ALIAS(NULL, NULL)
1537 };
1538
1539 #define P4_KW_ACTIVE    "active"
1540 #define P4_KW_ACTIVE_ANY "any"
1541 #define P4_KW_ACTIVE_BOTH "both"
1542 #define P4_KW_ACTIVE_NONE "none"
1543 #define P4_KW_ACTIVE_SINGLE "single"
1544 #define P4_KW_BUSREQTYPE "busreqtype"
1545 #define P4_KW_CASCADE   "cascade"
1546 #define P4_KW_EDGE      "edge"
1547 #define P4_KW_INV       "complement"
1548 #define P4_KW_OS        "os"
1549 #define P4_KW_MASK      "mask"
1550 #define P4_KW_PRECISE   "precise"
1551 #define P4_KW_TAG       "tag"
1552 #define P4_KW_THRESHOLD "threshold"
1553 #define P4_KW_USR       "usr"
1554
1555 #define __P4MASK(N,V) PMCMASK(N, (1 << (V)))
1556
1557 static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
1558         __P4MASK(dd, 0),
1559         __P4MASK(db, 1),
1560         __P4MASK(di, 2),
1561         __P4MASK(bd, 3),
1562         __P4MASK(bb, 4),
1563         __P4MASK(bi, 5),
1564         __P4MASK(id, 6),
1565         __P4MASK(ib, 7),
1566         NULLMASK
1567 };
1568
1569 static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
1570         __P4MASK(tcmiss, 0),
1571         NULLMASK,
1572 };
1573
1574 static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
1575         __P4MASK(hit, 0),
1576         __P4MASK(miss, 1),
1577         __P4MASK(hit-uc, 2),
1578         NULLMASK
1579 };
1580
1581 static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
1582         __P4MASK(st-rb-full, 2),
1583         __P4MASK(64k-conf, 3),
1584         NULLMASK
1585 };
1586
1587 static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
1588         __P4MASK(lsc, 0),
1589         __P4MASK(ssc, 1),
1590         NULLMASK
1591 };
1592
1593 static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
1594         __P4MASK(split-ld, 1),
1595         NULLMASK
1596 };
1597
1598 static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
1599         __P4MASK(split-st, 1),
1600         NULLMASK
1601 };
1602
1603 static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
1604         __P4MASK(no-sta, 1),
1605         __P4MASK(no-std, 3),
1606         __P4MASK(partial-data, 4),
1607         __P4MASK(unalgn-addr, 5),
1608         NULLMASK
1609 };
1610
1611 static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
1612         __P4MASK(dtmiss, 0),
1613         __P4MASK(itmiss, 1),
1614         NULLMASK
1615 };
1616
1617 static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
1618         __P4MASK(rd-2ndl-hits, 0),
1619         __P4MASK(rd-2ndl-hite, 1),
1620         __P4MASK(rd-2ndl-hitm, 2),
1621         __P4MASK(rd-3rdl-hits, 3),
1622         __P4MASK(rd-3rdl-hite, 4),
1623         __P4MASK(rd-3rdl-hitm, 5),
1624         __P4MASK(rd-2ndl-miss, 8),
1625         __P4MASK(rd-3rdl-miss, 9),
1626         __P4MASK(wr-2ndl-miss, 10),
1627         NULLMASK
1628 };
1629
1630 static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
1631         __P4MASK(all-read, 5),
1632         __P4MASK(all-write, 6),
1633         __P4MASK(mem-uc, 7),
1634         __P4MASK(mem-wc, 8),
1635         __P4MASK(mem-wt, 9),
1636         __P4MASK(mem-wp, 10),
1637         __P4MASK(mem-wb, 11),
1638         __P4MASK(own, 13),
1639         __P4MASK(other, 14),
1640         __P4MASK(prefetch, 15),
1641         NULLMASK
1642 };
1643
1644 static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
1645         __P4MASK(all-read, 5),
1646         __P4MASK(all-write, 6),
1647         __P4MASK(mem-uc, 7),
1648         __P4MASK(mem-wc, 8),
1649         __P4MASK(mem-wt, 9),
1650         __P4MASK(mem-wp, 10),
1651         __P4MASK(mem-wb, 11),
1652         __P4MASK(own, 13),
1653         __P4MASK(other, 14),
1654         __P4MASK(prefetch, 15),
1655         NULLMASK
1656 };
1657
1658 static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
1659         __P4MASK(drdy-drv, 0),
1660         __P4MASK(drdy-own, 1),
1661         __P4MASK(drdy-other, 2),
1662         __P4MASK(dbsy-drv, 3),
1663         __P4MASK(dbsy-own, 4),
1664         __P4MASK(dbsy-other, 5),
1665         NULLMASK
1666 };
1667
1668 static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
1669         __P4MASK(req-type0, 0),
1670         __P4MASK(req-type1, 1),
1671         __P4MASK(req-len0, 2),
1672         __P4MASK(req-len1, 3),
1673         __P4MASK(req-io-type, 5),
1674         __P4MASK(req-lock-type, 6),
1675         __P4MASK(req-cache-type, 7),
1676         __P4MASK(req-split-type, 8),
1677         __P4MASK(req-dem-type, 9),
1678         __P4MASK(req-ord-type, 10),
1679         __P4MASK(mem-type0, 11),
1680         __P4MASK(mem-type1, 12),
1681         __P4MASK(mem-type2, 13),
1682         NULLMASK
1683 };
1684
1685 static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
1686         __P4MASK(all, 15),
1687         NULLMASK
1688 };
1689
1690 static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
1691         __P4MASK(all, 15),
1692         NULLMASK
1693 };
1694
1695 static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
1696         __P4MASK(all, 15),
1697         NULLMASK
1698 };
1699
1700 static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
1701         __P4MASK(all, 15),
1702         NULLMASK
1703 };
1704
1705 static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
1706         __P4MASK(all, 15),
1707         NULLMASK
1708 };
1709
1710 static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
1711         __P4MASK(all, 15),
1712         NULLMASK
1713 };
1714
1715 static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
1716         __P4MASK(all, 15),
1717         NULLMASK
1718 };
1719
1720 static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
1721         __P4MASK(all, 15),
1722         NULLMASK
1723 };
1724
1725 static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
1726         __P4MASK(allp0, 3),
1727         __P4MASK(allp2, 4),
1728         NULLMASK
1729 };
1730
1731 static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
1732         __P4MASK(running, 0),
1733         NULLMASK
1734 };
1735
1736 static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
1737         __P4MASK(cisc, 0),
1738         NULLMASK
1739 };
1740
1741 static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
1742         __P4MASK(from-tc-build, 0),
1743         __P4MASK(from-tc-deliver, 1),
1744         __P4MASK(from-rom, 2),
1745         NULLMASK
1746 };
1747
1748 static const struct pmc_masks p4_mask_rmbt[] = {
1749         /* retired mispred branch type */
1750         __P4MASK(conditional, 1),
1751         __P4MASK(call, 2),
1752         __P4MASK(return, 3),
1753         __P4MASK(indirect, 4),
1754         NULLMASK
1755 };
1756
1757 static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
1758         __P4MASK(conditional, 1),
1759         __P4MASK(call, 2),
1760         __P4MASK(retired, 3),
1761         __P4MASK(indirect, 4),
1762         NULLMASK
1763 };
1764
1765 static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
1766         __P4MASK(sbfull, 5),
1767         NULLMASK
1768 };
1769
1770 static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
1771         __P4MASK(wcb-evicts, 0),
1772         __P4MASK(wcb-full-evict, 1),
1773         NULLMASK
1774 };
1775
1776 static const struct pmc_masks p4_mask_fee[] = { /* front end event */
1777         __P4MASK(nbogus, 0),
1778         __P4MASK(bogus, 1),
1779         NULLMASK
1780 };
1781
1782 static const struct pmc_masks p4_mask_ee[] = { /* execution event */
1783         __P4MASK(nbogus0, 0),
1784         __P4MASK(nbogus1, 1),
1785         __P4MASK(nbogus2, 2),
1786         __P4MASK(nbogus3, 3),
1787         __P4MASK(bogus0, 4),
1788         __P4MASK(bogus1, 5),
1789         __P4MASK(bogus2, 6),
1790         __P4MASK(bogus3, 7),
1791         NULLMASK
1792 };
1793
1794 static const struct pmc_masks p4_mask_re[] = { /* replay event */
1795         __P4MASK(nbogus, 0),
1796         __P4MASK(bogus, 1),
1797         NULLMASK
1798 };
1799
1800 static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
1801         __P4MASK(nbogusntag, 0),
1802         __P4MASK(nbogustag, 1),
1803         __P4MASK(bogusntag, 2),
1804         __P4MASK(bogustag, 3),
1805         NULLMASK
1806 };
1807
1808 static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
1809         __P4MASK(nbogus, 0),
1810         __P4MASK(bogus, 1),
1811         NULLMASK
1812 };
1813
1814 static const struct pmc_masks p4_mask_ut[] = { /* uop type */
1815         __P4MASK(tagloads, 1),
1816         __P4MASK(tagstores, 2),
1817         NULLMASK
1818 };
1819
1820 static const struct pmc_masks p4_mask_br[] = { /* branch retired */
1821         __P4MASK(mmnp, 0),
1822         __P4MASK(mmnm, 1),
1823         __P4MASK(mmtp, 2),
1824         __P4MASK(mmtm, 3),
1825         NULLMASK
1826 };
1827
1828 static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
1829         __P4MASK(nbogus, 0),
1830         NULLMASK
1831 };
1832
1833 static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
1834         __P4MASK(fpsu, 0),
1835         __P4MASK(fpso, 1),
1836         __P4MASK(poao, 2),
1837         __P4MASK(poau, 3),
1838         __P4MASK(prea, 4),
1839         NULLMASK
1840 };
1841
1842 static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
1843         __P4MASK(clear, 0),
1844         __P4MASK(moclear, 2),
1845         __P4MASK(smclear, 3),
1846         NULLMASK
1847 };
1848
1849 /* P4 event parser */
1850 static int
1851 p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
1852     struct pmc_op_pmcallocate *pmc_config)
1853 {
1854
1855         char    *e, *p, *q;
1856         int     count, has_tag, has_busreqtype, n;
1857         uint32_t cccractivemask;
1858         uint64_t evmask;
1859         const struct pmc_masks *pm, *pmask;
1860
1861         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1862         pmc_config->pm_md.pm_p4.pm_p4_cccrconfig =
1863             pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0;
1864
1865         pmask   = NULL;
1866         evmask  = 0;
1867         cccractivemask = 0x3;
1868         has_tag = has_busreqtype = 0;
1869
1870 #define __P4SETMASK(M) do {                             \
1871         pmask = p4_mask_##M;                            \
1872 } while (0)
1873
1874         switch (pe) {
1875         case PMC_EV_P4_TC_DELIVER_MODE:
1876                 __P4SETMASK(tcdm);
1877                 break;
1878         case PMC_EV_P4_BPU_FETCH_REQUEST:
1879                 __P4SETMASK(bfr);
1880                 break;
1881         case PMC_EV_P4_ITLB_REFERENCE:
1882                 __P4SETMASK(ir);
1883                 break;
1884         case PMC_EV_P4_MEMORY_CANCEL:
1885                 __P4SETMASK(memcan);
1886                 break;
1887         case PMC_EV_P4_MEMORY_COMPLETE:
1888                 __P4SETMASK(memcomp);
1889                 break;
1890         case PMC_EV_P4_LOAD_PORT_REPLAY:
1891                 __P4SETMASK(lpr);
1892                 break;
1893         case PMC_EV_P4_STORE_PORT_REPLAY:
1894                 __P4SETMASK(spr);
1895                 break;
1896         case PMC_EV_P4_MOB_LOAD_REPLAY:
1897                 __P4SETMASK(mlr);
1898                 break;
1899         case PMC_EV_P4_PAGE_WALK_TYPE:
1900                 __P4SETMASK(pwt);
1901                 break;
1902         case PMC_EV_P4_BSQ_CACHE_REFERENCE:
1903                 __P4SETMASK(bcr);
1904                 break;
1905         case PMC_EV_P4_IOQ_ALLOCATION:
1906                 __P4SETMASK(ia);
1907                 has_busreqtype = 1;
1908                 break;
1909         case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
1910                 __P4SETMASK(iae);
1911                 has_busreqtype = 1;
1912                 break;
1913         case PMC_EV_P4_FSB_DATA_ACTIVITY:
1914                 __P4SETMASK(fda);
1915                 break;
1916         case PMC_EV_P4_BSQ_ALLOCATION:
1917                 __P4SETMASK(ba);
1918                 break;
1919         case PMC_EV_P4_SSE_INPUT_ASSIST:
1920                 __P4SETMASK(sia);
1921                 break;
1922         case PMC_EV_P4_PACKED_SP_UOP:
1923                 __P4SETMASK(psu);
1924                 break;
1925         case PMC_EV_P4_PACKED_DP_UOP:
1926                 __P4SETMASK(pdu);
1927                 break;
1928         case PMC_EV_P4_SCALAR_SP_UOP:
1929                 __P4SETMASK(ssu);
1930                 break;
1931         case PMC_EV_P4_SCALAR_DP_UOP:
1932                 __P4SETMASK(sdu);
1933                 break;
1934         case PMC_EV_P4_64BIT_MMX_UOP:
1935                 __P4SETMASK(64bmu);
1936                 break;
1937         case PMC_EV_P4_128BIT_MMX_UOP:
1938                 __P4SETMASK(128bmu);
1939                 break;
1940         case PMC_EV_P4_X87_FP_UOP:
1941                 __P4SETMASK(xfu);
1942                 break;
1943         case PMC_EV_P4_X87_SIMD_MOVES_UOP:
1944                 __P4SETMASK(xsmu);
1945                 break;
1946         case PMC_EV_P4_GLOBAL_POWER_EVENTS:
1947                 __P4SETMASK(gpe);
1948                 break;
1949         case PMC_EV_P4_TC_MS_XFER:
1950                 __P4SETMASK(tmx);
1951                 break;
1952         case PMC_EV_P4_UOP_QUEUE_WRITES:
1953                 __P4SETMASK(uqw);
1954                 break;
1955         case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
1956                 __P4SETMASK(rmbt);
1957                 break;
1958         case PMC_EV_P4_RETIRED_BRANCH_TYPE:
1959                 __P4SETMASK(rbt);
1960                 break;
1961         case PMC_EV_P4_RESOURCE_STALL:
1962                 __P4SETMASK(rs);
1963                 break;
1964         case PMC_EV_P4_WC_BUFFER:
1965                 __P4SETMASK(wb);
1966                 break;
1967         case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
1968         case PMC_EV_P4_B2B_CYCLES:
1969         case PMC_EV_P4_BNR:
1970         case PMC_EV_P4_SNOOP:
1971         case PMC_EV_P4_RESPONSE:
1972                 break;
1973         case PMC_EV_P4_FRONT_END_EVENT:
1974                 __P4SETMASK(fee);
1975                 break;
1976         case PMC_EV_P4_EXECUTION_EVENT:
1977                 __P4SETMASK(ee);
1978                 break;
1979         case PMC_EV_P4_REPLAY_EVENT:
1980                 __P4SETMASK(re);
1981                 break;
1982         case PMC_EV_P4_INSTR_RETIRED:
1983                 __P4SETMASK(insret);
1984                 break;
1985         case PMC_EV_P4_UOPS_RETIRED:
1986                 __P4SETMASK(ur);
1987                 break;
1988         case PMC_EV_P4_UOP_TYPE:
1989                 __P4SETMASK(ut);
1990                 break;
1991         case PMC_EV_P4_BRANCH_RETIRED:
1992                 __P4SETMASK(br);
1993                 break;
1994         case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
1995                 __P4SETMASK(mbr);
1996                 break;
1997         case PMC_EV_P4_X87_ASSIST:
1998                 __P4SETMASK(xa);
1999                 break;
2000         case PMC_EV_P4_MACHINE_CLEAR:
2001                 __P4SETMASK(machclr);
2002                 break;
2003         default:
2004                 return (-1);
2005         }
2006
2007         /* process additional flags */
2008         while ((p = strsep(&ctrspec, ",")) != NULL) {
2009                 if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
2010                         q = strchr(p, '=');
2011                         if (*++q == '\0') /* skip '=' */
2012                                 return (-1);
2013
2014                         if (strcasecmp(q, P4_KW_ACTIVE_NONE) == 0)
2015                                 cccractivemask = 0x0;
2016                         else if (strcasecmp(q, P4_KW_ACTIVE_SINGLE) == 0)
2017                                 cccractivemask = 0x1;
2018                         else if (strcasecmp(q, P4_KW_ACTIVE_BOTH) == 0)
2019                                 cccractivemask = 0x2;
2020                         else if (strcasecmp(q, P4_KW_ACTIVE_ANY) == 0)
2021                                 cccractivemask = 0x3;
2022                         else
2023                                 return (-1);
2024
2025                 } else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
2026                         if (has_busreqtype == 0)
2027                                 return (-1);
2028
2029                         q = strchr(p, '=');
2030                         if (*++q == '\0') /* skip '=' */
2031                                 return (-1);
2032
2033                         count = strtol(q, &e, 0);
2034                         if (e == q || *e != '\0')
2035                                 return (-1);
2036                         evmask = (evmask & ~0x1F) | (count & 0x1F);
2037                 } else if (KWMATCH(p, P4_KW_CASCADE))
2038                         pmc_config->pm_caps |= PMC_CAP_CASCADE;
2039                 else if (KWMATCH(p, P4_KW_EDGE))
2040                         pmc_config->pm_caps |= PMC_CAP_EDGE;
2041                 else if (KWMATCH(p, P4_KW_INV))
2042                         pmc_config->pm_caps |= PMC_CAP_INVERT;
2043                 else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
2044                         if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
2045                                 return (-1);
2046                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2047                 } else if (KWMATCH(p, P4_KW_OS))
2048                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2049                 else if (KWMATCH(p, P4_KW_PRECISE))
2050                         pmc_config->pm_caps |= PMC_CAP_PRECISE;
2051                 else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
2052                         if (has_tag == 0)
2053                                 return (-1);
2054
2055                         q = strchr(p, '=');
2056                         if (*++q == '\0') /* skip '=' */
2057                                 return (-1);
2058
2059                         count = strtol(q, &e, 0);
2060                         if (e == q || *e != '\0')
2061                                 return (-1);
2062
2063                         pmc_config->pm_caps |= PMC_CAP_TAGGING;
2064                         pmc_config->pm_md.pm_p4.pm_p4_escrconfig |=
2065                             P4_ESCR_TO_TAG_VALUE(count);
2066                 } else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
2067                         q = strchr(p, '=');
2068                         if (*++q == '\0') /* skip '=' */
2069                                 return (-1);
2070
2071                         count = strtol(q, &e, 0);
2072                         if (e == q || *e != '\0')
2073                                 return (-1);
2074
2075                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
2076                         pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &=
2077                             ~P4_CCCR_THRESHOLD_MASK;
2078                         pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
2079                             P4_CCCR_TO_THRESHOLD(count);
2080                 } else if (KWMATCH(p, P4_KW_USR))
2081                         pmc_config->pm_caps |= PMC_CAP_USER;
2082                 else
2083                         return (-1);
2084         }
2085
2086         /* other post processing */
2087         if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
2088             pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
2089             pe == PMC_EV_P4_BSQ_ALLOCATION)
2090                 pmc_config->pm_caps |= PMC_CAP_EDGE;
2091
2092         /* fill in thread activity mask */
2093         pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
2094             P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
2095
2096         if (evmask)
2097                 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2098
2099         switch (pe) {
2100         case PMC_EV_P4_FSB_DATA_ACTIVITY:
2101                 if ((evmask & 0x06) == 0x06 ||
2102                     (evmask & 0x18) == 0x18)
2103                         return (-1); /* can't have own+other bits together */
2104                 if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
2105                         evmask = 0x1D;
2106                 break;
2107         case PMC_EV_P4_MACHINE_CLEAR:
2108                 /* only one bit is allowed to be set */
2109                 if ((evmask & (evmask - 1)) != 0)
2110                         return (-1);
2111                 if (evmask == 0) {
2112                         evmask = 0x1;   /* 'CLEAR' */
2113                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2114                 }
2115                 break;
2116         default:
2117                 if (evmask == 0 && pmask) {
2118                         for (pm = pmask; pm->pm_name; pm++)
2119                                 evmask |= pm->pm_value;
2120                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2121                 }
2122         }
2123
2124         pmc_config->pm_md.pm_p4.pm_p4_escrconfig =
2125             P4_ESCR_TO_EVENT_MASK(evmask);
2126
2127         return (0);
2128 }
2129
2130 #endif
2131
2132 #if defined(__i386__)
2133
2134 /*
2135  * Pentium style PMCs
2136  */
2137
2138 static struct pmc_event_alias p5_aliases[] = {
2139         EV_ALIAS("branches",            "p5-taken-branches"),
2140         EV_ALIAS("cycles",              "tsc"),
2141         EV_ALIAS("dc-misses",           "p5-data-read-miss-or-write-miss"),
2142         EV_ALIAS("ic-misses",           "p5-code-cache-miss"),
2143         EV_ALIAS("instructions",        "p5-instructions-executed"),
2144         EV_ALIAS("interrupts",          "p5-hardware-interrupts"),
2145         EV_ALIAS("unhalted-cycles",
2146             "p5-number-of-cycles-not-in-halt-state"),
2147         EV_ALIAS(NULL, NULL)
2148 };
2149
2150 static int
2151 p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
2152     struct pmc_op_pmcallocate *pmc_config)
2153 {
2154         return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */
2155 }
2156
2157 /*
2158  * Pentium Pro style PMCs.  These PMCs are found in Pentium II, Pentium III,
2159  * and Pentium M CPUs.
2160  */
2161
2162 static struct pmc_event_alias p6_aliases[] = {
2163         EV_ALIAS("branches",            "p6-br-inst-retired"),
2164         EV_ALIAS("branch-mispredicts",  "p6-br-miss-pred-retired"),
2165         EV_ALIAS("cycles",              "tsc"),
2166         EV_ALIAS("dc-misses",           "p6-dcu-lines-in"),
2167         EV_ALIAS("ic-misses",           "p6-ifu-fetch-miss"),
2168         EV_ALIAS("instructions",        "p6-inst-retired"),
2169         EV_ALIAS("interrupts",          "p6-hw-int-rx"),
2170         EV_ALIAS("unhalted-cycles",     "p6-cpu-clk-unhalted"),
2171         EV_ALIAS(NULL, NULL)
2172 };
2173
2174 #define P6_KW_CMASK     "cmask"
2175 #define P6_KW_EDGE      "edge"
2176 #define P6_KW_INV       "inv"
2177 #define P6_KW_OS        "os"
2178 #define P6_KW_UMASK     "umask"
2179 #define P6_KW_USR       "usr"
2180
2181 static struct pmc_masks p6_mask_mesi[] = {
2182         PMCMASK(m,      0x01),
2183         PMCMASK(e,      0x02),
2184         PMCMASK(s,      0x04),
2185         PMCMASK(i,      0x08),
2186         NULLMASK
2187 };
2188
2189 static struct pmc_masks p6_mask_mesihw[] = {
2190         PMCMASK(m,      0x01),
2191         PMCMASK(e,      0x02),
2192         PMCMASK(s,      0x04),
2193         PMCMASK(i,      0x08),
2194         PMCMASK(nonhw,  0x00),
2195         PMCMASK(hw,     0x10),
2196         PMCMASK(both,   0x30),
2197         NULLMASK
2198 };
2199
2200 static struct pmc_masks p6_mask_hw[] = {
2201         PMCMASK(nonhw,  0x00),
2202         PMCMASK(hw,     0x10),
2203         PMCMASK(both,   0x30),
2204         NULLMASK
2205 };
2206
2207 static struct pmc_masks p6_mask_any[] = {
2208         PMCMASK(self,   0x00),
2209         PMCMASK(any,    0x20),
2210         NULLMASK
2211 };
2212
2213 static struct pmc_masks p6_mask_ekp[] = {
2214         PMCMASK(nta,    0x00),
2215         PMCMASK(t1,     0x01),
2216         PMCMASK(t2,     0x02),
2217         PMCMASK(wos,    0x03),
2218         NULLMASK
2219 };
2220
2221 static struct pmc_masks p6_mask_pps[] = {
2222         PMCMASK(packed-and-scalar, 0x00),
2223         PMCMASK(scalar, 0x01),
2224         NULLMASK
2225 };
2226
2227 static struct pmc_masks p6_mask_mite[] = {
2228         PMCMASK(packed-multiply,         0x01),
2229         PMCMASK(packed-shift,           0x02),
2230         PMCMASK(pack,                   0x04),
2231         PMCMASK(unpack,                 0x08),
2232         PMCMASK(packed-logical,         0x10),
2233         PMCMASK(packed-arithmetic,      0x20),
2234         NULLMASK
2235 };
2236
2237 static struct pmc_masks p6_mask_fmt[] = {
2238         PMCMASK(mmxtofp,        0x00),
2239         PMCMASK(fptommx,        0x01),
2240         NULLMASK
2241 };
2242
2243 static struct pmc_masks p6_mask_sr[] = {
2244         PMCMASK(es,     0x01),
2245         PMCMASK(ds,     0x02),
2246         PMCMASK(fs,     0x04),
2247         PMCMASK(gs,     0x08),
2248         NULLMASK
2249 };
2250
2251 static struct pmc_masks p6_mask_eet[] = {
2252         PMCMASK(all,    0x00),
2253         PMCMASK(freq,   0x02),
2254         NULLMASK
2255 };
2256
2257 static struct pmc_masks p6_mask_efur[] = {
2258         PMCMASK(all,    0x00),
2259         PMCMASK(loadop, 0x01),
2260         PMCMASK(stdsta, 0x02),
2261         NULLMASK
2262 };
2263
2264 static struct pmc_masks p6_mask_essir[] = {
2265         PMCMASK(sse-packed-single,      0x00),
2266         PMCMASK(sse-packed-single-scalar-single, 0x01),
2267         PMCMASK(sse2-packed-double,     0x02),
2268         PMCMASK(sse2-scalar-double,     0x03),
2269         NULLMASK
2270 };
2271
2272 static struct pmc_masks p6_mask_esscir[] = {
2273         PMCMASK(sse-packed-single,      0x00),
2274         PMCMASK(sse-scalar-single,      0x01),
2275         PMCMASK(sse2-packed-double,     0x02),
2276         PMCMASK(sse2-scalar-double,     0x03),
2277         NULLMASK
2278 };
2279
2280 /* P6 event parser */
2281 static int
2282 p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
2283     struct pmc_op_pmcallocate *pmc_config)
2284 {
2285         char *e, *p, *q;
2286         uint64_t evmask;
2287         int count, n;
2288         const struct pmc_masks *pm, *pmask;
2289
2290         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2291         pmc_config->pm_md.pm_ppro.pm_ppro_config = 0;
2292
2293         evmask = 0;
2294
2295 #define P6MASKSET(M)    pmask = p6_mask_ ## M
2296
2297         switch(pe) {
2298         case PMC_EV_P6_L2_IFETCH:       P6MASKSET(mesi); break;
2299         case PMC_EV_P6_L2_LD:           P6MASKSET(mesi); break;
2300         case PMC_EV_P6_L2_ST:           P6MASKSET(mesi); break;
2301         case PMC_EV_P6_L2_RQSTS:        P6MASKSET(mesi); break;
2302         case PMC_EV_P6_BUS_DRDY_CLOCKS:
2303         case PMC_EV_P6_BUS_LOCK_CLOCKS:
2304         case PMC_EV_P6_BUS_TRAN_BRD:
2305         case PMC_EV_P6_BUS_TRAN_RFO:
2306         case PMC_EV_P6_BUS_TRANS_WB:
2307         case PMC_EV_P6_BUS_TRAN_IFETCH:
2308         case PMC_EV_P6_BUS_TRAN_INVAL:
2309         case PMC_EV_P6_BUS_TRAN_PWR:
2310         case PMC_EV_P6_BUS_TRANS_P:
2311         case PMC_EV_P6_BUS_TRANS_IO:
2312         case PMC_EV_P6_BUS_TRAN_DEF:
2313         case PMC_EV_P6_BUS_TRAN_BURST:
2314         case PMC_EV_P6_BUS_TRAN_ANY:
2315         case PMC_EV_P6_BUS_TRAN_MEM:
2316                 P6MASKSET(any); break;
2317         case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
2318         case PMC_EV_P6_EMON_KNI_PREF_MISS:
2319                 P6MASKSET(ekp); break;
2320         case PMC_EV_P6_EMON_KNI_INST_RETIRED:
2321         case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
2322                 P6MASKSET(pps); break;
2323         case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
2324                 P6MASKSET(mite); break;
2325         case PMC_EV_P6_FP_MMX_TRANS:
2326                 P6MASKSET(fmt); break;
2327         case PMC_EV_P6_SEG_RENAME_STALLS:
2328         case PMC_EV_P6_SEG_REG_RENAMES:
2329                 P6MASKSET(sr);  break;
2330         case PMC_EV_P6_EMON_EST_TRANS:
2331                 P6MASKSET(eet); break;
2332         case PMC_EV_P6_EMON_FUSED_UOPS_RET:
2333                 P6MASKSET(efur); break;
2334         case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
2335                 P6MASKSET(essir); break;
2336         case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
2337                 P6MASKSET(esscir); break;
2338         default:
2339                 pmask = NULL;
2340                 break;
2341         }
2342
2343         /* Pentium M PMCs have a few events with different semantics */
2344         if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
2345                 if (pe == PMC_EV_P6_L2_LD ||
2346                     pe == PMC_EV_P6_L2_LINES_IN ||
2347                     pe == PMC_EV_P6_L2_LINES_OUT)
2348                         P6MASKSET(mesihw);
2349                 else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
2350                         P6MASKSET(hw);
2351         }
2352
2353         /* Parse additional modifiers if present */
2354         while ((p = strsep(&ctrspec, ",")) != NULL) {
2355                 if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
2356                         q = strchr(p, '=');
2357                         if (*++q == '\0') /* skip '=' */
2358                                 return (-1);
2359                         count = strtol(q, &e, 0);
2360                         if (e == q || *e != '\0')
2361                                 return (-1);
2362                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
2363                         pmc_config->pm_md.pm_ppro.pm_ppro_config |=
2364                             P6_EVSEL_TO_CMASK(count);
2365                 } else if (KWMATCH(p, P6_KW_EDGE)) {
2366                         pmc_config->pm_caps |= PMC_CAP_EDGE;
2367                 } else if (KWMATCH(p, P6_KW_INV)) {
2368                         pmc_config->pm_caps |= PMC_CAP_INVERT;
2369                 } else if (KWMATCH(p, P6_KW_OS)) {
2370                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2371                 } else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
2372                         evmask = 0;
2373                         if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
2374                                 return (-1);
2375                         if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
2376                              pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
2377                              pe == PMC_EV_P6_BUS_TRAN_BRD ||
2378                              pe == PMC_EV_P6_BUS_TRAN_RFO ||
2379                              pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
2380                              pe == PMC_EV_P6_BUS_TRAN_INVAL ||
2381                              pe == PMC_EV_P6_BUS_TRAN_PWR ||
2382                              pe == PMC_EV_P6_BUS_TRAN_DEF ||
2383                              pe == PMC_EV_P6_BUS_TRAN_BURST ||
2384                              pe == PMC_EV_P6_BUS_TRAN_ANY ||
2385                              pe == PMC_EV_P6_BUS_TRAN_MEM ||
2386                              pe == PMC_EV_P6_BUS_TRANS_IO ||
2387                              pe == PMC_EV_P6_BUS_TRANS_P ||
2388                              pe == PMC_EV_P6_BUS_TRANS_WB ||
2389                              pe == PMC_EV_P6_EMON_EST_TRANS ||
2390                              pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
2391                              pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
2392                              pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
2393                              pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
2394                              pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
2395                              pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
2396                              pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
2397                              pe == PMC_EV_P6_FP_MMX_TRANS)
2398                             && (n > 1)) /* Only one mask keyword is allowed. */
2399                                 return (-1);
2400                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2401                 } else if (KWMATCH(p, P6_KW_USR)) {
2402                         pmc_config->pm_caps |= PMC_CAP_USER;
2403                 } else
2404                         return (-1);
2405         }
2406
2407         /* post processing */
2408         switch (pe) {
2409
2410                 /*
2411                  * The following events default to an evmask of 0
2412                  */
2413
2414                 /* default => 'self' */
2415         case PMC_EV_P6_BUS_DRDY_CLOCKS:
2416         case PMC_EV_P6_BUS_LOCK_CLOCKS:
2417         case PMC_EV_P6_BUS_TRAN_BRD:
2418         case PMC_EV_P6_BUS_TRAN_RFO:
2419         case PMC_EV_P6_BUS_TRANS_WB:
2420         case PMC_EV_P6_BUS_TRAN_IFETCH:
2421         case PMC_EV_P6_BUS_TRAN_INVAL:
2422         case PMC_EV_P6_BUS_TRAN_PWR:
2423         case PMC_EV_P6_BUS_TRANS_P:
2424         case PMC_EV_P6_BUS_TRANS_IO:
2425         case PMC_EV_P6_BUS_TRAN_DEF:
2426         case PMC_EV_P6_BUS_TRAN_BURST:
2427         case PMC_EV_P6_BUS_TRAN_ANY:
2428         case PMC_EV_P6_BUS_TRAN_MEM:
2429
2430                 /* default => 'nta' */
2431         case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
2432         case PMC_EV_P6_EMON_KNI_PREF_MISS:
2433
2434                 /* default => 'packed and scalar' */
2435         case PMC_EV_P6_EMON_KNI_INST_RETIRED:
2436         case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
2437
2438                 /* default => 'mmx to fp transitions' */
2439         case PMC_EV_P6_FP_MMX_TRANS:
2440
2441                 /* default => 'SSE Packed Single' */
2442         case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
2443         case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
2444
2445                 /* default => 'all fused micro-ops' */
2446         case PMC_EV_P6_EMON_FUSED_UOPS_RET:
2447
2448                 /* default => 'all transitions' */
2449         case PMC_EV_P6_EMON_EST_TRANS:
2450                 break;
2451
2452         case PMC_EV_P6_MMX_UOPS_EXEC:
2453                 evmask = 0x0F;          /* only value allowed */
2454                 break;
2455
2456         default:
2457                 /*
2458                  * For all other events, set the default event mask
2459                  * to a logical OR of all the allowed event mask bits.
2460                  */
2461                 if (evmask == 0 && pmask) {
2462                         for (pm = pmask; pm->pm_name; pm++)
2463                                 evmask |= pm->pm_value;
2464                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2465                 }
2466
2467                 break;
2468         }
2469
2470         if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
2471                 pmc_config->pm_md.pm_ppro.pm_ppro_config |=
2472                     P6_EVSEL_TO_UMASK(evmask);
2473
2474         return (0);
2475 }
2476
2477 #endif
2478
2479 #if     defined(__i386__) || defined(__amd64__)
2480 static int
2481 tsc_allocate_pmc(enum pmc_event pe, char *ctrspec,
2482     struct pmc_op_pmcallocate *pmc_config)
2483 {
2484         if (pe != PMC_EV_TSC_TSC)
2485                 return (-1);
2486
2487         /* TSC events must be unqualified. */
2488         if (ctrspec && *ctrspec != '\0')
2489                 return (-1);
2490
2491         pmc_config->pm_md.pm_amd.pm_amd_config = 0;
2492         pmc_config->pm_caps |= PMC_CAP_READ;
2493
2494         return (0);
2495 }
2496 #endif
2497
2498 static struct pmc_event_alias generic_aliases[] = {
2499         EV_ALIAS("instructions",                "SOFT-CLOCK.HARD"),
2500         EV_ALIAS(NULL, NULL)
2501 };
2502
2503 static int
2504 soft_allocate_pmc(enum pmc_event pe, char *ctrspec,
2505     struct pmc_op_pmcallocate *pmc_config)
2506 {
2507         (void)ctrspec;
2508         (void)pmc_config;
2509
2510         if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST)
2511                 return (-1);
2512
2513         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2514         return (0);
2515 }
2516
2517 #if     defined(__arm__)
2518 #if     defined(__XSCALE__)
2519
2520 static struct pmc_event_alias xscale_aliases[] = {
2521         EV_ALIAS("branches",            "BRANCH_RETIRED"),
2522         EV_ALIAS("branch-mispredicts",  "BRANCH_MISPRED"),
2523         EV_ALIAS("dc-misses",           "DC_MISS"),
2524         EV_ALIAS("ic-misses",           "IC_MISS"),
2525         EV_ALIAS("instructions",        "INSTR_RETIRED"),
2526         EV_ALIAS(NULL, NULL)
2527 };
2528 static int
2529 xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2530     struct pmc_op_pmcallocate *pmc_config __unused)
2531 {
2532         switch (pe) {
2533         default:
2534                 break;
2535         }
2536
2537         return (0);
2538 }
2539 #endif
2540
2541 static struct pmc_event_alias cortex_a8_aliases[] = {
2542         EV_ALIAS("dc-misses",           "L1_DCACHE_REFILL"),
2543         EV_ALIAS("ic-misses",           "L1_ICACHE_REFILL"),
2544         EV_ALIAS("instructions",        "INSTR_EXECUTED"),
2545         EV_ALIAS(NULL, NULL)
2546 };
2547
2548 static struct pmc_event_alias cortex_a9_aliases[] = {
2549         EV_ALIAS("dc-misses",           "L1_DCACHE_REFILL"),
2550         EV_ALIAS("ic-misses",           "L1_ICACHE_REFILL"),
2551         EV_ALIAS("instructions",        "INSTR_EXECUTED"),
2552         EV_ALIAS(NULL, NULL)
2553 };
2554
2555 static int
2556 armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2557     struct pmc_op_pmcallocate *pmc_config __unused)
2558 {
2559         switch (pe) {
2560         default:
2561                 break;
2562         }
2563
2564         return (0);
2565 }
2566 #endif
2567
2568 #if     defined(__aarch64__)
2569 static struct pmc_event_alias cortex_a53_aliases[] = {
2570         EV_ALIAS(NULL, NULL)
2571 };
2572 static struct pmc_event_alias cortex_a57_aliases[] = {
2573         EV_ALIAS(NULL, NULL)
2574 };
2575 static int
2576 arm64_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2577     struct pmc_op_pmcallocate *pmc_config __unused)
2578 {
2579         switch (pe) {
2580         default:
2581                 break;
2582         }
2583
2584         return (0);
2585 }
2586 #endif
2587
2588 #if defined(__mips__)
2589
2590 static struct pmc_event_alias mips24k_aliases[] = {
2591         EV_ALIAS("instructions",        "INSTR_EXECUTED"),
2592         EV_ALIAS("branches",            "BRANCH_COMPLETED"),
2593         EV_ALIAS("branch-mispredicts",  "BRANCH_MISPRED"),
2594         EV_ALIAS(NULL, NULL)
2595 };
2596
2597 static struct pmc_event_alias mips74k_aliases[] = {
2598         EV_ALIAS("instructions",        "INSTR_EXECUTED"),
2599         EV_ALIAS("branches",            "BRANCH_INSNS"),
2600         EV_ALIAS("branch-mispredicts",  "MISPREDICTED_BRANCH_INSNS"),
2601         EV_ALIAS(NULL, NULL)
2602 };
2603
2604 static struct pmc_event_alias octeon_aliases[] = {
2605         EV_ALIAS("instructions",        "RET"),
2606         EV_ALIAS("branches",            "BR"),
2607         EV_ALIAS("branch-mispredicts",  "BRMIS"),
2608         EV_ALIAS(NULL, NULL)
2609 };
2610
2611 #define MIPS_KW_OS              "os"
2612 #define MIPS_KW_USR             "usr"
2613 #define MIPS_KW_ANYTHREAD       "anythread"
2614
2615 static int
2616 mips_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2617                   struct pmc_op_pmcallocate *pmc_config __unused)
2618 {
2619         char *p;
2620
2621         (void) pe;
2622
2623         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2624         
2625         while ((p = strsep(&ctrspec, ",")) != NULL) {
2626                 if (KWMATCH(p, MIPS_KW_OS))
2627                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2628                 else if (KWMATCH(p, MIPS_KW_USR))
2629                         pmc_config->pm_caps |= PMC_CAP_USER;
2630                 else if (KWMATCH(p, MIPS_KW_ANYTHREAD))
2631                         pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
2632                 else
2633                         return (-1);
2634         }
2635
2636         return (0);
2637 }
2638
2639 #endif /* __mips__ */
2640
2641 #if defined(__powerpc__)
2642
2643 static struct pmc_event_alias ppc7450_aliases[] = {
2644         EV_ALIAS("instructions",        "INSTR_COMPLETED"),
2645         EV_ALIAS("branches",            "BRANCHES_COMPLETED"),
2646         EV_ALIAS("branch-mispredicts",  "MISPREDICTED_BRANCHES"),
2647         EV_ALIAS(NULL, NULL)
2648 };
2649
2650 static struct pmc_event_alias ppc970_aliases[] = {
2651         EV_ALIAS("instructions", "INSTR_COMPLETED"),
2652         EV_ALIAS("cycles",       "CYCLES"),
2653         EV_ALIAS(NULL, NULL)
2654 };
2655
2656 static struct pmc_event_alias e500_aliases[] = {
2657         EV_ALIAS("instructions", "INSTR_COMPLETED"),
2658         EV_ALIAS("cycles",       "CYCLES"),
2659         EV_ALIAS(NULL, NULL)
2660 };
2661
2662 #define POWERPC_KW_OS           "os"
2663 #define POWERPC_KW_USR          "usr"
2664 #define POWERPC_KW_ANYTHREAD    "anythread"
2665
2666 static int
2667 powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2668                      struct pmc_op_pmcallocate *pmc_config __unused)
2669 {
2670         char *p;
2671
2672         (void) pe;
2673
2674         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2675         
2676         while ((p = strsep(&ctrspec, ",")) != NULL) {
2677                 if (KWMATCH(p, POWERPC_KW_OS))
2678                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2679                 else if (KWMATCH(p, POWERPC_KW_USR))
2680                         pmc_config->pm_caps |= PMC_CAP_USER;
2681                 else if (KWMATCH(p, POWERPC_KW_ANYTHREAD))
2682                         pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
2683                 else
2684                         return (-1);
2685         }
2686
2687         return (0);
2688 }
2689
2690 #endif /* __powerpc__ */
2691
2692
2693 /*
2694  * Match an event name `name' with its canonical form.
2695  *
2696  * Matches are case insensitive and spaces, periods, underscores and
2697  * hyphen characters are considered to match each other.
2698  *
2699  * Returns 1 for a match, 0 otherwise.
2700  */
2701
2702 static int
2703 pmc_match_event_name(const char *name, const char *canonicalname)
2704 {
2705         int cc, nc;
2706         const unsigned char *c, *n;
2707
2708         c = (const unsigned char *) canonicalname;
2709         n = (const unsigned char *) name;
2710
2711         for (; (nc = *n) && (cc = *c); n++, c++) {
2712
2713                 if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
2714                     (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
2715                         continue;
2716
2717                 if (toupper(nc) == toupper(cc))
2718                         continue;
2719
2720
2721                 return (0);
2722         }
2723
2724         if (*n == '\0' && *c == '\0')
2725                 return (1);
2726
2727         return (0);
2728 }
2729
2730 /*
2731  * Match an event name against all the event named supported by a
2732  * PMC class.
2733  *
2734  * Returns an event descriptor pointer on match or NULL otherwise.
2735  */
2736 static const struct pmc_event_descr *
2737 pmc_match_event_class(const char *name,
2738     const struct pmc_class_descr *pcd)
2739 {
2740         size_t n;
2741         const struct pmc_event_descr *ev;
2742
2743         ev = pcd->pm_evc_event_table;
2744         for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
2745                 if (pmc_match_event_name(name, ev->pm_ev_name))
2746                         return (ev);
2747
2748         return (NULL);
2749 }
2750
2751 static int
2752 pmc_mdep_is_compatible_class(enum pmc_class pc)
2753 {
2754         size_t n;
2755
2756         for (n = 0; n < pmc_mdep_class_list_size; n++)
2757                 if (pmc_mdep_class_list[n] == pc)
2758                         return (1);
2759         return (0);
2760 }
2761
2762 /*
2763  * API entry points
2764  */
2765
2766 int
2767 pmc_allocate(const char *ctrspec, enum pmc_mode mode,
2768     uint32_t flags, int cpu, pmc_id_t *pmcid)
2769 {
2770         size_t n;
2771         int retval;
2772         char *r, *spec_copy;
2773         const char *ctrname;
2774         const struct pmc_event_descr *ev;
2775         const struct pmc_event_alias *alias;
2776         struct pmc_op_pmcallocate pmc_config;
2777         const struct pmc_class_descr *pcd;
2778
2779         spec_copy = NULL;
2780         retval    = -1;
2781
2782         if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
2783             mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
2784                 return (EINVAL);
2785         }
2786         bzero(&pmc_config, sizeof(pmc_config));
2787         pmc_config.pm_cpu   = cpu;
2788         pmc_config.pm_mode  = mode;
2789         pmc_config.pm_flags = flags;
2790         if (PMC_IS_SAMPLING_MODE(mode))
2791                 pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
2792         /*
2793          * Can we pull this straight from the pmu table?
2794          */
2795         r = spec_copy = strdup(ctrspec);
2796         ctrname = strsep(&r, ",");
2797         if (pmc_pmu_pmcallocate(ctrname, &pmc_config) == 0) {
2798                 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) {
2799                         retval = errno;
2800                         goto out;
2801                 }
2802                 *pmcid = pmc_config.pm_pmcid;
2803                 goto out;
2804         } else {
2805                 free(spec_copy);
2806                 spec_copy = NULL;
2807         }
2808
2809         /* replace an event alias with the canonical event specifier */
2810         if (pmc_mdep_event_aliases)
2811                 for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++)
2812                         if (!strcasecmp(ctrspec, alias->pm_alias)) {
2813                                 spec_copy = strdup(alias->pm_spec);
2814                                 break;
2815                         }
2816
2817         if (spec_copy == NULL)
2818                 spec_copy = strdup(ctrspec);
2819
2820         r = spec_copy;
2821         ctrname = strsep(&r, ",");
2822
2823         /*
2824          * If a explicit class prefix was given by the user, restrict the
2825          * search for the event to the specified PMC class.
2826          */
2827         ev = NULL;
2828         for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
2829                 pcd = pmc_class_table[n];
2830                 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) &&
2831                     strncasecmp(ctrname, pcd->pm_evc_name,
2832                                 pcd->pm_evc_name_size) == 0) {
2833                         if ((ev = pmc_match_event_class(ctrname +
2834                             pcd->pm_evc_name_size, pcd)) == NULL) {
2835                                 errno = EINVAL;
2836                                 goto out;
2837                         }
2838                         break;
2839                 }
2840         }
2841
2842         /*
2843          * Otherwise, search for this event in all compatible PMC
2844          * classes.
2845          */
2846         for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
2847                 pcd = pmc_class_table[n];
2848                 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class))
2849                         ev = pmc_match_event_class(ctrname, pcd);
2850         }
2851
2852         if (ev == NULL) {
2853                 errno = EINVAL;
2854                 goto out;
2855         }
2856
2857         pmc_config.pm_ev    = ev->pm_ev_code;
2858         pmc_config.pm_class = pcd->pm_evc_class;
2859
2860         if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) {
2861                 errno = EINVAL;
2862                 goto out;
2863         }
2864
2865         if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
2866                 goto out;
2867
2868         *pmcid = pmc_config.pm_pmcid;
2869
2870         retval = 0;
2871
2872  out:
2873         if (spec_copy)
2874                 free(spec_copy);
2875
2876         return (retval);
2877 }
2878
2879 int
2880 pmc_attach(pmc_id_t pmc, pid_t pid)
2881 {
2882         struct pmc_op_pmcattach pmc_attach_args;
2883
2884         pmc_attach_args.pm_pmc = pmc;
2885         pmc_attach_args.pm_pid = pid;
2886
2887         return (PMC_CALL(PMCATTACH, &pmc_attach_args));
2888 }
2889
2890 int
2891 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
2892 {
2893         unsigned int i;
2894         enum pmc_class cl;
2895
2896         cl = PMC_ID_TO_CLASS(pmcid);
2897         for (i = 0; i < cpu_info.pm_nclass; i++)
2898                 if (cpu_info.pm_classes[i].pm_class == cl) {
2899                         *caps = cpu_info.pm_classes[i].pm_caps;
2900                         return (0);
2901                 }
2902         errno = EINVAL;
2903         return (-1);
2904 }
2905
2906 int
2907 pmc_configure_logfile(int fd)
2908 {
2909         struct pmc_op_configurelog cla;
2910
2911         cla.pm_logfd = fd;
2912         if (PMC_CALL(CONFIGURELOG, &cla) < 0)
2913                 return (-1);
2914         return (0);
2915 }
2916
2917 int
2918 pmc_cpuinfo(const struct pmc_cpuinfo **pci)
2919 {
2920         if (pmc_syscall == -1) {
2921                 errno = ENXIO;
2922                 return (-1);
2923         }
2924
2925         *pci = &cpu_info;
2926         return (0);
2927 }
2928
2929 int
2930 pmc_detach(pmc_id_t pmc, pid_t pid)
2931 {
2932         struct pmc_op_pmcattach pmc_detach_args;
2933
2934         pmc_detach_args.pm_pmc = pmc;
2935         pmc_detach_args.pm_pid = pid;
2936         return (PMC_CALL(PMCDETACH, &pmc_detach_args));
2937 }
2938
2939 int
2940 pmc_disable(int cpu, int pmc)
2941 {
2942         struct pmc_op_pmcadmin ssa;
2943
2944         ssa.pm_cpu = cpu;
2945         ssa.pm_pmc = pmc;
2946         ssa.pm_state = PMC_STATE_DISABLED;
2947         return (PMC_CALL(PMCADMIN, &ssa));
2948 }
2949
2950 int
2951 pmc_enable(int cpu, int pmc)
2952 {
2953         struct pmc_op_pmcadmin ssa;
2954
2955         ssa.pm_cpu = cpu;
2956         ssa.pm_pmc = pmc;
2957         ssa.pm_state = PMC_STATE_FREE;
2958         return (PMC_CALL(PMCADMIN, &ssa));
2959 }
2960
2961 /*
2962  * Return a list of events known to a given PMC class.  'cl' is the
2963  * PMC class identifier, 'eventnames' is the returned list of 'const
2964  * char *' pointers pointing to the names of the events. 'nevents' is
2965  * the number of event name pointers returned.
2966  *
2967  * The space for 'eventnames' is allocated using malloc(3).  The caller
2968  * is responsible for freeing this space when done.
2969  */
2970 int
2971 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
2972     int *nevents)
2973 {
2974         int count;
2975         const char **names;
2976         const struct pmc_event_descr *ev;
2977
2978         switch (cl)
2979         {
2980         case PMC_CLASS_IAF:
2981                 ev = iaf_event_table;
2982                 count = PMC_EVENT_TABLE_SIZE(iaf);
2983                 break;
2984         case PMC_CLASS_IAP:
2985                 /*
2986                  * Return the most appropriate set of event name
2987                  * spellings for the current CPU.
2988                  */
2989                 switch (cpu_info.pm_cputype) {
2990                 default:
2991                 case PMC_CPU_INTEL_ATOM:
2992                         ev = atom_event_table;
2993                         count = PMC_EVENT_TABLE_SIZE(atom);
2994                         break;
2995                 case PMC_CPU_INTEL_ATOM_SILVERMONT:
2996                         ev = atom_silvermont_event_table;
2997                         count = PMC_EVENT_TABLE_SIZE(atom_silvermont);
2998                         break;
2999                 case PMC_CPU_INTEL_CORE:
3000                         ev = core_event_table;
3001                         count = PMC_EVENT_TABLE_SIZE(core);
3002                         break;
3003                 case PMC_CPU_INTEL_CORE2:
3004                 case PMC_CPU_INTEL_CORE2EXTREME:
3005                         ev = core2_event_table;
3006                         count = PMC_EVENT_TABLE_SIZE(core2);
3007                         break;
3008                 case PMC_CPU_INTEL_COREI7:
3009                         ev = corei7_event_table;
3010                         count = PMC_EVENT_TABLE_SIZE(corei7);
3011                         break;
3012                 case PMC_CPU_INTEL_NEHALEM_EX:
3013                         ev = nehalem_ex_event_table;
3014                         count = PMC_EVENT_TABLE_SIZE(nehalem_ex);
3015                         break;
3016                 case PMC_CPU_INTEL_HASWELL:
3017                         ev = haswell_event_table;
3018                         count = PMC_EVENT_TABLE_SIZE(haswell);
3019                         break;
3020                 case PMC_CPU_INTEL_HASWELL_XEON:
3021                         ev = haswell_xeon_event_table;
3022                         count = PMC_EVENT_TABLE_SIZE(haswell_xeon);
3023                         break;
3024                 case PMC_CPU_INTEL_BROADWELL:
3025                         ev = broadwell_event_table;
3026                         count = PMC_EVENT_TABLE_SIZE(broadwell);
3027                         break;
3028                 case PMC_CPU_INTEL_BROADWELL_XEON:
3029                         ev = broadwell_xeon_event_table;
3030                         count = PMC_EVENT_TABLE_SIZE(broadwell_xeon);
3031                         break;
3032                 case PMC_CPU_INTEL_SKYLAKE:
3033                         ev = skylake_event_table;
3034                         count = PMC_EVENT_TABLE_SIZE(skylake);
3035                         break;
3036                 case PMC_CPU_INTEL_SKYLAKE_XEON:
3037                         ev = skylake_xeon_event_table;
3038                         count = PMC_EVENT_TABLE_SIZE(skylake_xeon);
3039                         break;
3040                 case PMC_CPU_INTEL_IVYBRIDGE:
3041                         ev = ivybridge_event_table;
3042                         count = PMC_EVENT_TABLE_SIZE(ivybridge);
3043                         break;
3044                 case PMC_CPU_INTEL_IVYBRIDGE_XEON:
3045                         ev = ivybridge_xeon_event_table;
3046                         count = PMC_EVENT_TABLE_SIZE(ivybridge_xeon);
3047                         break;
3048                 case PMC_CPU_INTEL_SANDYBRIDGE:
3049                         ev = sandybridge_event_table;
3050                         count = PMC_EVENT_TABLE_SIZE(sandybridge);
3051                         break;
3052                 case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
3053                         ev = sandybridge_xeon_event_table;
3054                         count = PMC_EVENT_TABLE_SIZE(sandybridge_xeon);
3055                         break;
3056                 case PMC_CPU_INTEL_WESTMERE:
3057                         ev = westmere_event_table;
3058                         count = PMC_EVENT_TABLE_SIZE(westmere);
3059                         break;
3060                 case PMC_CPU_INTEL_WESTMERE_EX:
3061                         ev = westmere_ex_event_table;
3062                         count = PMC_EVENT_TABLE_SIZE(westmere_ex);
3063                         break;
3064                 }
3065                 break;
3066         case PMC_CLASS_UCF:
3067                 ev = ucf_event_table;
3068                 count = PMC_EVENT_TABLE_SIZE(ucf);
3069                 break;
3070         case PMC_CLASS_UCP:
3071                 /*
3072                  * Return the most appropriate set of event name
3073                  * spellings for the current CPU.
3074                  */
3075                 switch (cpu_info.pm_cputype) {
3076                 default:
3077                 case PMC_CPU_INTEL_COREI7:
3078                         ev = corei7uc_event_table;
3079                         count = PMC_EVENT_TABLE_SIZE(corei7uc);
3080                         break;
3081                 case PMC_CPU_INTEL_HASWELL:
3082                         ev = haswelluc_event_table;
3083                         count = PMC_EVENT_TABLE_SIZE(haswelluc);
3084                         break;
3085                 case PMC_CPU_INTEL_BROADWELL:
3086                         ev = broadwelluc_event_table;
3087                         count = PMC_EVENT_TABLE_SIZE(broadwelluc);
3088                         break;
3089                 case PMC_CPU_INTEL_SANDYBRIDGE:
3090                         ev = sandybridgeuc_event_table;
3091                         count = PMC_EVENT_TABLE_SIZE(sandybridgeuc);
3092                         break;
3093                 case PMC_CPU_INTEL_WESTMERE:
3094                         ev = westmereuc_event_table;
3095                         count = PMC_EVENT_TABLE_SIZE(westmereuc);
3096                         break;
3097                 }
3098                 break;
3099         case PMC_CLASS_TSC:
3100                 ev = tsc_event_table;
3101                 count = PMC_EVENT_TABLE_SIZE(tsc);
3102                 break;
3103         case PMC_CLASS_K7:
3104                 ev = k7_event_table;
3105                 count = PMC_EVENT_TABLE_SIZE(k7);
3106                 break;
3107         case PMC_CLASS_K8:
3108                 ev = k8_event_table;
3109                 count = PMC_EVENT_TABLE_SIZE(k8);
3110                 break;
3111         case PMC_CLASS_P4:
3112                 ev = p4_event_table;
3113                 count = PMC_EVENT_TABLE_SIZE(p4);
3114                 break;
3115         case PMC_CLASS_P5:
3116                 ev = p5_event_table;
3117                 count = PMC_EVENT_TABLE_SIZE(p5);
3118                 break;
3119         case PMC_CLASS_P6:
3120                 ev = p6_event_table;
3121                 count = PMC_EVENT_TABLE_SIZE(p6);
3122                 break;
3123         case PMC_CLASS_XSCALE:
3124                 ev = xscale_event_table;
3125                 count = PMC_EVENT_TABLE_SIZE(xscale);
3126                 break;
3127         case PMC_CLASS_ARMV7:
3128                 switch (cpu_info.pm_cputype) {
3129                 default:
3130                 case PMC_CPU_ARMV7_CORTEX_A8:
3131                         ev = cortex_a8_event_table;
3132                         count = PMC_EVENT_TABLE_SIZE(cortex_a8);
3133                         break;
3134                 case PMC_CPU_ARMV7_CORTEX_A9:
3135                         ev = cortex_a9_event_table;
3136                         count = PMC_EVENT_TABLE_SIZE(cortex_a9);
3137                         break;
3138                 }
3139                 break;
3140         case PMC_CLASS_ARMV8:
3141                 switch (cpu_info.pm_cputype) {
3142                 default:
3143                 case PMC_CPU_ARMV8_CORTEX_A53:
3144                         ev = cortex_a53_event_table;
3145                         count = PMC_EVENT_TABLE_SIZE(cortex_a53);
3146                         break;
3147                 case PMC_CPU_ARMV8_CORTEX_A57:
3148                         ev = cortex_a57_event_table;
3149                         count = PMC_EVENT_TABLE_SIZE(cortex_a57);
3150                         break;
3151                 }
3152                 break;
3153         case PMC_CLASS_MIPS24K:
3154                 ev = mips24k_event_table;
3155                 count = PMC_EVENT_TABLE_SIZE(mips24k);
3156                 break;
3157         case PMC_CLASS_MIPS74K:
3158                 ev = mips74k_event_table;
3159                 count = PMC_EVENT_TABLE_SIZE(mips74k);
3160                 break;
3161         case PMC_CLASS_OCTEON:
3162                 ev = octeon_event_table;
3163                 count = PMC_EVENT_TABLE_SIZE(octeon);
3164                 break;
3165         case PMC_CLASS_PPC7450:
3166                 ev = ppc7450_event_table;
3167                 count = PMC_EVENT_TABLE_SIZE(ppc7450);
3168                 break;
3169         case PMC_CLASS_PPC970:
3170                 ev = ppc970_event_table;
3171                 count = PMC_EVENT_TABLE_SIZE(ppc970);
3172                 break;
3173         case PMC_CLASS_E500:
3174                 ev = e500_event_table;
3175                 count = PMC_EVENT_TABLE_SIZE(e500);
3176                 break;
3177         case PMC_CLASS_SOFT:
3178                 ev = soft_event_table;
3179                 count = soft_event_info.pm_nevent;
3180                 break;
3181         default:
3182                 errno = EINVAL;
3183                 return (-1);
3184         }
3185
3186         if ((names = malloc(count * sizeof(const char *))) == NULL)
3187                 return (-1);
3188
3189         *eventnames = names;
3190         *nevents = count;
3191
3192         for (;count--; ev++, names++)
3193                 *names = ev->pm_ev_name;
3194
3195         return (0);
3196 }
3197
3198 int
3199 pmc_flush_logfile(void)
3200 {
3201         return (PMC_CALL(FLUSHLOG,0));
3202 }
3203
3204 int
3205 pmc_close_logfile(void)
3206 {
3207         return (PMC_CALL(CLOSELOG,0));
3208 }
3209
3210 int
3211 pmc_get_driver_stats(struct pmc_driverstats *ds)
3212 {
3213         struct pmc_op_getdriverstats gms;
3214
3215         if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
3216                 return (-1);
3217
3218         /* copy out fields in the current userland<->library interface */
3219         ds->pm_intr_ignored    = gms.pm_intr_ignored;
3220         ds->pm_intr_processed  = gms.pm_intr_processed;
3221         ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
3222         ds->pm_syscalls        = gms.pm_syscalls;
3223         ds->pm_syscall_errors  = gms.pm_syscall_errors;
3224         ds->pm_buffer_requests = gms.pm_buffer_requests;
3225         ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
3226         ds->pm_log_sweeps      = gms.pm_log_sweeps;
3227         return (0);
3228 }
3229
3230 int
3231 pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
3232 {
3233         struct pmc_op_getmsr gm;
3234
3235         gm.pm_pmcid = pmc;
3236         if (PMC_CALL(PMCGETMSR, &gm) < 0)
3237                 return (-1);
3238         *msr = gm.pm_msr;
3239         return (0);
3240 }
3241
3242 int
3243 pmc_init(void)
3244 {
3245         int error, pmc_mod_id;
3246         unsigned int n;
3247         uint32_t abi_version;
3248         struct module_stat pmc_modstat;
3249         struct pmc_op_getcpuinfo op_cpu_info;
3250 #if defined(__amd64__) || defined(__i386__)
3251         int cpu_has_iaf_counters;
3252         unsigned int t;
3253 #endif
3254
3255         if (pmc_syscall != -1) /* already inited */
3256                 return (0);
3257
3258         /* retrieve the system call number from the KLD */
3259         if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
3260                 return (-1);
3261
3262         pmc_modstat.version = sizeof(struct module_stat);
3263         if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
3264                 return (-1);
3265
3266         pmc_syscall = pmc_modstat.data.intval;
3267
3268         /* check the kernel module's ABI against our compiled-in version */
3269         abi_version = PMC_VERSION;
3270         if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
3271                 return (pmc_syscall = -1);
3272
3273         /* ignore patch & minor numbers for the comparison */
3274         if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
3275                 errno  = EPROGMISMATCH;
3276                 return (pmc_syscall = -1);
3277         }
3278
3279         if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0)
3280                 return (pmc_syscall = -1);
3281
3282         cpu_info.pm_cputype = op_cpu_info.pm_cputype;
3283         cpu_info.pm_ncpu    = op_cpu_info.pm_ncpu;
3284         cpu_info.pm_npmc    = op_cpu_info.pm_npmc;
3285         cpu_info.pm_nclass  = op_cpu_info.pm_nclass;
3286         for (n = 0; n < cpu_info.pm_nclass; n++)
3287                 memcpy(&cpu_info.pm_classes[n], &op_cpu_info.pm_classes[n],
3288                     sizeof(cpu_info.pm_classes[n]));
3289
3290         pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE *
3291             sizeof(struct pmc_class_descr *));
3292
3293         if (pmc_class_table == NULL)
3294                 return (-1);
3295
3296         for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++)
3297                 pmc_class_table[n] = NULL;
3298
3299         /*
3300          * Get soft events list.
3301          */
3302         soft_event_info.pm_class = PMC_CLASS_SOFT;
3303         if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0)
3304                 return (pmc_syscall = -1);
3305
3306         /* Map soft events to static list. */
3307         for (n = 0; n < soft_event_info.pm_nevent; n++) {
3308                 soft_event_table[n].pm_ev_name =
3309                     soft_event_info.pm_events[n].pm_ev_name;
3310                 soft_event_table[n].pm_ev_code =
3311                     soft_event_info.pm_events[n].pm_ev_code;
3312         }
3313         soft_class_table_descr.pm_evc_event_table_size = \
3314             soft_event_info.pm_nevent;
3315         soft_class_table_descr.pm_evc_event_table = \
3316             soft_event_table;
3317
3318         /*
3319          * Fill in the class table.
3320          */
3321         n = 0;
3322
3323         /* Fill soft events information. */
3324         pmc_class_table[n++] = &soft_class_table_descr;
3325 #if defined(__amd64__) || defined(__i386__)
3326         if (cpu_info.pm_cputype != PMC_CPU_GENERIC)
3327                 pmc_class_table[n++] = &tsc_class_table_descr;
3328
3329         /*
3330          * Check if this CPU has fixed function counters.
3331          */
3332         cpu_has_iaf_counters = 0;
3333         for (t = 0; t < cpu_info.pm_nclass; t++)
3334                 if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF &&
3335                     cpu_info.pm_classes[t].pm_num > 0)
3336                         cpu_has_iaf_counters = 1;
3337 #endif
3338
3339 #define PMC_MDEP_INIT(C) do {                                   \
3340                 pmc_mdep_event_aliases    = C##_aliases;        \
3341                 pmc_mdep_class_list  = C##_pmc_classes;         \
3342                 pmc_mdep_class_list_size =                      \
3343                     PMC_TABLE_SIZE(C##_pmc_classes);            \
3344         } while (0)
3345
3346 #define PMC_MDEP_INIT_INTEL_V2(C) do {                                  \
3347                 PMC_MDEP_INIT(C);                                       \
3348                 pmc_class_table[n++] = &iaf_class_table_descr;          \
3349                 if (!cpu_has_iaf_counters)                              \
3350                         pmc_mdep_event_aliases =                        \
3351                                 C##_aliases_without_iaf;                \
3352                 pmc_class_table[n] = &C##_class_table_descr;            \
3353         } while (0)
3354
3355         /* Configure the event name parser. */
3356         switch (cpu_info.pm_cputype) {
3357 #if defined(__i386__)
3358         case PMC_CPU_AMD_K7:
3359                 PMC_MDEP_INIT(k7);
3360                 pmc_class_table[n] = &k7_class_table_descr;
3361                 break;
3362         case PMC_CPU_INTEL_P5:
3363                 PMC_MDEP_INIT(p5);
3364                 pmc_class_table[n]  = &p5_class_table_descr;
3365                 break;
3366         case PMC_CPU_INTEL_P6:          /* P6 ... Pentium M CPUs have */
3367         case PMC_CPU_INTEL_PII:         /* similar PMCs. */
3368         case PMC_CPU_INTEL_PIII:
3369         case PMC_CPU_INTEL_PM:
3370                 PMC_MDEP_INIT(p6);
3371                 pmc_class_table[n] = &p6_class_table_descr;
3372                 break;
3373 #endif
3374 #if defined(__amd64__) || defined(__i386__)
3375         case PMC_CPU_AMD_K8:
3376                 PMC_MDEP_INIT(k8);
3377                 pmc_class_table[n] = &k8_class_table_descr;
3378                 break;
3379         case PMC_CPU_INTEL_ATOM:
3380                 PMC_MDEP_INIT_INTEL_V2(atom);
3381                 break;
3382         case PMC_CPU_INTEL_ATOM_SILVERMONT:
3383                 PMC_MDEP_INIT_INTEL_V2(atom_silvermont);
3384                 break;
3385         case PMC_CPU_INTEL_CORE:
3386                 PMC_MDEP_INIT(core);
3387                 pmc_class_table[n] = &core_class_table_descr;
3388                 break;
3389         case PMC_CPU_INTEL_CORE2:
3390         case PMC_CPU_INTEL_CORE2EXTREME:
3391                 PMC_MDEP_INIT_INTEL_V2(core2);
3392                 break;
3393         case PMC_CPU_INTEL_COREI7:
3394                 pmc_class_table[n++] = &ucf_class_table_descr;
3395                 pmc_class_table[n++] = &corei7uc_class_table_descr;
3396                 PMC_MDEP_INIT_INTEL_V2(corei7);
3397                 break;
3398         case PMC_CPU_INTEL_NEHALEM_EX:
3399                 PMC_MDEP_INIT_INTEL_V2(nehalem_ex);
3400                 break;
3401         case PMC_CPU_INTEL_HASWELL:
3402                 pmc_class_table[n++] = &ucf_class_table_descr;
3403                 pmc_class_table[n++] = &haswelluc_class_table_descr;
3404                 PMC_MDEP_INIT_INTEL_V2(haswell);
3405                 break;
3406         case PMC_CPU_INTEL_HASWELL_XEON:
3407                 PMC_MDEP_INIT_INTEL_V2(haswell_xeon);
3408                 break;
3409         case PMC_CPU_INTEL_BROADWELL:
3410                 pmc_class_table[n++] = &ucf_class_table_descr;
3411                 pmc_class_table[n++] = &broadwelluc_class_table_descr;
3412                 PMC_MDEP_INIT_INTEL_V2(broadwell);
3413                 break;
3414         case PMC_CPU_INTEL_BROADWELL_XEON:
3415                 PMC_MDEP_INIT_INTEL_V2(broadwell_xeon);
3416                 break;
3417         case PMC_CPU_INTEL_SKYLAKE:
3418                 PMC_MDEP_INIT_INTEL_V2(skylake);
3419                 break;
3420         case PMC_CPU_INTEL_SKYLAKE_XEON:
3421                 PMC_MDEP_INIT_INTEL_V2(skylake_xeon);
3422                 break;
3423         case PMC_CPU_INTEL_IVYBRIDGE:
3424                 PMC_MDEP_INIT_INTEL_V2(ivybridge);
3425                 break;
3426         case PMC_CPU_INTEL_IVYBRIDGE_XEON:
3427                 PMC_MDEP_INIT_INTEL_V2(ivybridge_xeon);
3428                 break;
3429         case PMC_CPU_INTEL_SANDYBRIDGE:
3430                 pmc_class_table[n++] = &ucf_class_table_descr;
3431                 pmc_class_table[n++] = &sandybridgeuc_class_table_descr;
3432                 PMC_MDEP_INIT_INTEL_V2(sandybridge);
3433                 break;
3434         case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
3435                 PMC_MDEP_INIT_INTEL_V2(sandybridge_xeon);
3436                 break;
3437         case PMC_CPU_INTEL_WESTMERE:
3438                 pmc_class_table[n++] = &ucf_class_table_descr;
3439                 pmc_class_table[n++] = &westmereuc_class_table_descr;
3440                 PMC_MDEP_INIT_INTEL_V2(westmere);
3441                 break;
3442         case PMC_CPU_INTEL_WESTMERE_EX:
3443                 PMC_MDEP_INIT_INTEL_V2(westmere_ex);
3444                 break;
3445         case PMC_CPU_INTEL_PIV:
3446                 PMC_MDEP_INIT(p4);
3447                 pmc_class_table[n] = &p4_class_table_descr;
3448                 break;
3449 #endif
3450         case PMC_CPU_GENERIC:
3451                 PMC_MDEP_INIT(generic);
3452                 break;
3453 #if defined(__arm__)
3454 #if defined(__XSCALE__)
3455         case PMC_CPU_INTEL_XSCALE:
3456                 PMC_MDEP_INIT(xscale);
3457                 pmc_class_table[n] = &xscale_class_table_descr;
3458                 break;
3459 #endif
3460         case PMC_CPU_ARMV7_CORTEX_A8:
3461                 PMC_MDEP_INIT(cortex_a8);
3462                 pmc_class_table[n] = &cortex_a8_class_table_descr;
3463                 break;
3464         case PMC_CPU_ARMV7_CORTEX_A9:
3465                 PMC_MDEP_INIT(cortex_a9);
3466                 pmc_class_table[n] = &cortex_a9_class_table_descr;
3467                 break;
3468 #endif
3469 #if defined(__aarch64__)
3470         case PMC_CPU_ARMV8_CORTEX_A53:
3471                 PMC_MDEP_INIT(cortex_a53);
3472                 pmc_class_table[n] = &cortex_a53_class_table_descr;
3473                 break;
3474         case PMC_CPU_ARMV8_CORTEX_A57:
3475                 PMC_MDEP_INIT(cortex_a57);
3476                 pmc_class_table[n] = &cortex_a57_class_table_descr;
3477                 break;
3478 #endif
3479 #if defined(__mips__)
3480         case PMC_CPU_MIPS_24K:
3481                 PMC_MDEP_INIT(mips24k);
3482                 pmc_class_table[n] = &mips24k_class_table_descr;
3483                 break;
3484         case PMC_CPU_MIPS_74K:
3485                 PMC_MDEP_INIT(mips74k);
3486                 pmc_class_table[n] = &mips74k_class_table_descr;
3487                 break;
3488         case PMC_CPU_MIPS_OCTEON:
3489                 PMC_MDEP_INIT(octeon);
3490                 pmc_class_table[n] = &octeon_class_table_descr;
3491                 break;
3492 #endif /* __mips__ */
3493 #if defined(__powerpc__)
3494         case PMC_CPU_PPC_7450:
3495                 PMC_MDEP_INIT(ppc7450);
3496                 pmc_class_table[n] = &ppc7450_class_table_descr;
3497                 break;
3498         case PMC_CPU_PPC_970:
3499                 PMC_MDEP_INIT(ppc970);
3500                 pmc_class_table[n] = &ppc970_class_table_descr;
3501                 break;
3502         case PMC_CPU_PPC_E500:
3503                 PMC_MDEP_INIT(e500);
3504                 pmc_class_table[n] = &e500_class_table_descr;
3505                 break;
3506 #endif
3507         default:
3508                 /*
3509                  * Some kind of CPU this version of the library knows nothing
3510                  * about.  This shouldn't happen since the abi version check
3511                  * should have caught this.
3512                  */
3513                 errno = ENXIO;
3514                 return (pmc_syscall = -1);
3515         }
3516
3517         return (0);
3518 }
3519
3520 const char *
3521 pmc_name_of_capability(enum pmc_caps cap)
3522 {
3523         int i;
3524
3525         /*
3526          * 'cap' should have a single bit set and should be in
3527          * range.
3528          */
3529         if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
3530             cap > PMC_CAP_LAST) {
3531                 errno = EINVAL;
3532                 return (NULL);
3533         }
3534
3535         i = ffs(cap);
3536         return (pmc_capability_names[i - 1]);
3537 }
3538
3539 const char *
3540 pmc_name_of_class(enum pmc_class pc)
3541 {
3542         size_t n;
3543
3544         for (n = 0; n < PMC_TABLE_SIZE(pmc_class_names); n++)
3545                 if (pc == pmc_class_names[n].pm_class)
3546                         return (pmc_class_names[n].pm_name);
3547
3548         errno = EINVAL;
3549         return (NULL);
3550 }
3551
3552 const char *
3553 pmc_name_of_cputype(enum pmc_cputype cp)
3554 {
3555         size_t n;
3556
3557         for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++)
3558                 if (cp == pmc_cputype_names[n].pm_cputype)
3559                         return (pmc_cputype_names[n].pm_name);
3560
3561         errno = EINVAL;
3562         return (NULL);
3563 }
3564
3565 const char *
3566 pmc_name_of_disposition(enum pmc_disp pd)
3567 {
3568         if ((int) pd >= PMC_DISP_FIRST &&
3569             pd <= PMC_DISP_LAST)
3570                 return (pmc_disposition_names[pd]);
3571
3572         errno = EINVAL;
3573         return (NULL);
3574 }
3575
3576 const char *
3577 _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
3578 {
3579         const struct pmc_event_descr *ev, *evfence;
3580
3581         ev = evfence = NULL;
3582         if (pe >= PMC_EV_IAF_FIRST && pe <= PMC_EV_IAF_LAST) {
3583                 ev = iaf_event_table;
3584                 evfence = iaf_event_table + PMC_EVENT_TABLE_SIZE(iaf);
3585         } else if (pe >= PMC_EV_IAP_FIRST && pe <= PMC_EV_IAP_LAST) {
3586                 switch (cpu) {
3587                 case PMC_CPU_INTEL_ATOM:
3588                         ev = atom_event_table;
3589                         evfence = atom_event_table + PMC_EVENT_TABLE_SIZE(atom);
3590                         break;
3591                 case PMC_CPU_INTEL_ATOM_SILVERMONT:
3592                         ev = atom_silvermont_event_table;
3593                         evfence = atom_silvermont_event_table +
3594                             PMC_EVENT_TABLE_SIZE(atom_silvermont);
3595                         break;
3596                 case PMC_CPU_INTEL_CORE:
3597                         ev = core_event_table;
3598                         evfence = core_event_table + PMC_EVENT_TABLE_SIZE(core);
3599                         break;
3600                 case PMC_CPU_INTEL_CORE2:
3601                 case PMC_CPU_INTEL_CORE2EXTREME:
3602                         ev = core2_event_table;
3603                         evfence = core2_event_table + PMC_EVENT_TABLE_SIZE(core2);
3604                         break;
3605                 case PMC_CPU_INTEL_COREI7:
3606                         ev = corei7_event_table;
3607                         evfence = corei7_event_table + PMC_EVENT_TABLE_SIZE(corei7);
3608                         break;
3609                 case PMC_CPU_INTEL_NEHALEM_EX:
3610                         ev = nehalem_ex_event_table;
3611                         evfence = nehalem_ex_event_table +
3612                             PMC_EVENT_TABLE_SIZE(nehalem_ex);
3613                         break;
3614                 case PMC_CPU_INTEL_HASWELL:
3615                         ev = haswell_event_table;
3616                         evfence = haswell_event_table + PMC_EVENT_TABLE_SIZE(haswell);
3617                         break;
3618                 case PMC_CPU_INTEL_HASWELL_XEON:
3619                         ev = haswell_xeon_event_table;
3620                         evfence = haswell_xeon_event_table + PMC_EVENT_TABLE_SIZE(haswell_xeon);
3621                         break;
3622                 case PMC_CPU_INTEL_BROADWELL:
3623                         ev = broadwell_event_table;
3624                         evfence = broadwell_event_table + PMC_EVENT_TABLE_SIZE(broadwell);
3625                         break;
3626                 case PMC_CPU_INTEL_BROADWELL_XEON:
3627                         ev = broadwell_xeon_event_table;
3628                         evfence = broadwell_xeon_event_table + PMC_EVENT_TABLE_SIZE(broadwell_xeon);
3629                         break;
3630                 case PMC_CPU_INTEL_SKYLAKE:
3631                         ev = skylake_event_table;
3632                         evfence = skylake_event_table +
3633                             PMC_EVENT_TABLE_SIZE(skylake);
3634                         break;
3635                 case PMC_CPU_INTEL_SKYLAKE_XEON:
3636                         ev = skylake_xeon_event_table;
3637                         evfence = skylake_xeon_event_table +
3638                             PMC_EVENT_TABLE_SIZE(skylake_xeon);
3639                         break;
3640                 case PMC_CPU_INTEL_IVYBRIDGE:
3641                         ev = ivybridge_event_table;
3642                         evfence = ivybridge_event_table + PMC_EVENT_TABLE_SIZE(ivybridge);
3643                         break;
3644                 case PMC_CPU_INTEL_IVYBRIDGE_XEON:
3645                         ev = ivybridge_xeon_event_table;
3646                         evfence = ivybridge_xeon_event_table + PMC_EVENT_TABLE_SIZE(ivybridge_xeon);
3647                         break;
3648                 case PMC_CPU_INTEL_SANDYBRIDGE:
3649                         ev = sandybridge_event_table;
3650                         evfence = sandybridge_event_table + PMC_EVENT_TABLE_SIZE(sandybridge);
3651                         break;
3652                 case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
3653                         ev = sandybridge_xeon_event_table;
3654                         evfence = sandybridge_xeon_event_table + PMC_EVENT_TABLE_SIZE(sandybridge_xeon);
3655                         break;
3656                 case PMC_CPU_INTEL_WESTMERE:
3657                         ev = westmere_event_table;
3658                         evfence = westmere_event_table + PMC_EVENT_TABLE_SIZE(westmere);
3659                         break;
3660                 case PMC_CPU_INTEL_WESTMERE_EX:
3661                         ev = westmere_ex_event_table;
3662                         evfence = westmere_ex_event_table +
3663                             PMC_EVENT_TABLE_SIZE(westmere_ex);
3664                         break;
3665                 default:        /* Unknown CPU type. */
3666                         break;
3667                 }
3668         } else if (pe >= PMC_EV_UCF_FIRST && pe <= PMC_EV_UCF_LAST) {
3669                 ev = ucf_event_table;
3670                 evfence = ucf_event_table + PMC_EVENT_TABLE_SIZE(ucf);
3671         } else if (pe >= PMC_EV_UCP_FIRST && pe <= PMC_EV_UCP_LAST) {
3672                 switch (cpu) {
3673                 case PMC_CPU_INTEL_COREI7:
3674                         ev = corei7uc_event_table;
3675                         evfence = corei7uc_event_table + PMC_EVENT_TABLE_SIZE(corei7uc);
3676                         break;
3677                 case PMC_CPU_INTEL_SANDYBRIDGE:
3678                         ev = sandybridgeuc_event_table;
3679                         evfence = sandybridgeuc_event_table + PMC_EVENT_TABLE_SIZE(sandybridgeuc);
3680                         break;
3681                 case PMC_CPU_INTEL_WESTMERE:
3682                         ev = westmereuc_event_table;
3683                         evfence = westmereuc_event_table + PMC_EVENT_TABLE_SIZE(westmereuc);
3684                         break;
3685                 default:        /* Unknown CPU type. */
3686                         break;
3687                 }
3688         } else if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) {
3689                 ev = k7_event_table;
3690                 evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7);
3691         } else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
3692                 ev = k8_event_table;
3693                 evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8);
3694         } else if (pe >= PMC_EV_P4_FIRST && pe <= PMC_EV_P4_LAST) {
3695                 ev = p4_event_table;
3696                 evfence = p4_event_table + PMC_EVENT_TABLE_SIZE(p4);
3697         } else if (pe >= PMC_EV_P5_FIRST && pe <= PMC_EV_P5_LAST) {
3698                 ev = p5_event_table;
3699                 evfence = p5_event_table + PMC_EVENT_TABLE_SIZE(p5);
3700         } else if (pe >= PMC_EV_P6_FIRST && pe <= PMC_EV_P6_LAST) {
3701                 ev = p6_event_table;
3702                 evfence = p6_event_table + PMC_EVENT_TABLE_SIZE(p6);
3703         } else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) {
3704                 ev = xscale_event_table;
3705                 evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale);
3706         } else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) {
3707                 switch (cpu) {
3708                 case PMC_CPU_ARMV7_CORTEX_A8:
3709                         ev = cortex_a8_event_table;
3710                         evfence = cortex_a8_event_table + PMC_EVENT_TABLE_SIZE(cortex_a8);
3711                         break;
3712                 case PMC_CPU_ARMV7_CORTEX_A9:
3713                         ev = cortex_a9_event_table;
3714                         evfence = cortex_a9_event_table + PMC_EVENT_TABLE_SIZE(cortex_a9);
3715                         break;
3716                 default:        /* Unknown CPU type. */
3717                         break;
3718                 }
3719         } else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) {
3720                 switch (cpu) {
3721                 case PMC_CPU_ARMV8_CORTEX_A53:
3722                         ev = cortex_a53_event_table;
3723                         evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53);
3724                         break;
3725                 case PMC_CPU_ARMV8_CORTEX_A57:
3726                         ev = cortex_a57_event_table;
3727                         evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57);
3728                         break;
3729                 default:        /* Unknown CPU type. */
3730                         break;
3731                 }
3732         } else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) {
3733                 ev = mips24k_event_table;
3734                 evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k);
3735         } else if (pe >= PMC_EV_MIPS74K_FIRST && pe <= PMC_EV_MIPS74K_LAST) {
3736                 ev = mips74k_event_table;
3737                 evfence = mips74k_event_table + PMC_EVENT_TABLE_SIZE(mips74k);
3738         } else if (pe >= PMC_EV_OCTEON_FIRST && pe <= PMC_EV_OCTEON_LAST) {
3739                 ev = octeon_event_table;
3740                 evfence = octeon_event_table + PMC_EVENT_TABLE_SIZE(octeon);
3741         } else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) {
3742                 ev = ppc7450_event_table;
3743                 evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450);
3744         } else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) {
3745                 ev = ppc970_event_table;
3746                 evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970);
3747         } else if (pe >= PMC_EV_E500_FIRST && pe <= PMC_EV_E500_LAST) {
3748                 ev = e500_event_table;
3749                 evfence = e500_event_table + PMC_EVENT_TABLE_SIZE(e500);
3750         } else if (pe == PMC_EV_TSC_TSC) {
3751                 ev = tsc_event_table;
3752                 evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
3753         } else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) {
3754                 ev = soft_event_table;
3755                 evfence = soft_event_table + soft_event_info.pm_nevent;
3756         }
3757
3758         for (; ev != evfence; ev++)
3759                 if (pe == ev->pm_ev_code)
3760                         return (ev->pm_ev_name);
3761
3762         return (NULL);
3763 }
3764
3765 const char *
3766 pmc_name_of_event(enum pmc_event pe)
3767 {
3768         const char *n;
3769
3770         if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
3771                 return (n);
3772
3773         errno = EINVAL;
3774         return (NULL);
3775 }
3776
3777 const char *
3778 pmc_name_of_mode(enum pmc_mode pm)
3779 {
3780         if ((int) pm >= PMC_MODE_FIRST &&
3781             pm <= PMC_MODE_LAST)
3782                 return (pmc_mode_names[pm]);
3783
3784         errno = EINVAL;
3785         return (NULL);
3786 }
3787
3788 const char *
3789 pmc_name_of_state(enum pmc_state ps)
3790 {
3791         if ((int) ps >= PMC_STATE_FIRST &&
3792             ps <= PMC_STATE_LAST)
3793                 return (pmc_state_names[ps]);
3794
3795         errno = EINVAL;
3796         return (NULL);
3797 }
3798
3799 int
3800 pmc_ncpu(void)
3801 {
3802         if (pmc_syscall == -1) {
3803                 errno = ENXIO;
3804                 return (-1);
3805         }
3806
3807         return (cpu_info.pm_ncpu);
3808 }
3809
3810 int
3811 pmc_npmc(int cpu)
3812 {
3813         if (pmc_syscall == -1) {
3814                 errno = ENXIO;
3815                 return (-1);
3816         }
3817
3818         if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
3819                 errno = EINVAL;
3820                 return (-1);
3821         }
3822
3823         return (cpu_info.pm_npmc);
3824 }
3825
3826 int
3827 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
3828 {
3829         int nbytes, npmc;
3830         struct pmc_op_getpmcinfo *pmci;
3831
3832         if ((npmc = pmc_npmc(cpu)) < 0)
3833                 return (-1);
3834
3835         nbytes = sizeof(struct pmc_op_getpmcinfo) +
3836             npmc * sizeof(struct pmc_info);
3837
3838         if ((pmci = calloc(1, nbytes)) == NULL)
3839                 return (-1);
3840
3841         pmci->pm_cpu  = cpu;
3842
3843         if (PMC_CALL(GETPMCINFO, pmci) < 0) {
3844                 free(pmci);
3845                 return (-1);
3846         }
3847
3848         /* kernel<->library, library<->userland interfaces are identical */
3849         *ppmci = (struct pmc_pmcinfo *) pmci;
3850         return (0);
3851 }
3852
3853 int
3854 pmc_read(pmc_id_t pmc, pmc_value_t *value)
3855 {
3856         struct pmc_op_pmcrw pmc_read_op;
3857
3858         pmc_read_op.pm_pmcid = pmc;
3859         pmc_read_op.pm_flags = PMC_F_OLDVALUE;
3860         pmc_read_op.pm_value = -1;
3861
3862         if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
3863                 return (-1);
3864
3865         *value = pmc_read_op.pm_value;
3866         return (0);
3867 }
3868
3869 int
3870 pmc_release(pmc_id_t pmc)
3871 {
3872         struct pmc_op_simple    pmc_release_args;
3873
3874         pmc_release_args.pm_pmcid = pmc;
3875         return (PMC_CALL(PMCRELEASE, &pmc_release_args));
3876 }
3877
3878 int
3879 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
3880 {
3881         struct pmc_op_pmcrw pmc_rw_op;
3882
3883         pmc_rw_op.pm_pmcid = pmc;
3884         pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
3885         pmc_rw_op.pm_value = newvalue;
3886
3887         if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
3888                 return (-1);
3889
3890         *oldvaluep = pmc_rw_op.pm_value;
3891         return (0);
3892 }
3893
3894 int
3895 pmc_set(pmc_id_t pmc, pmc_value_t value)
3896 {
3897         struct pmc_op_pmcsetcount sc;
3898
3899         sc.pm_pmcid = pmc;
3900         sc.pm_count = value;
3901
3902         if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
3903                 return (-1);
3904         return (0);
3905 }
3906
3907 int
3908 pmc_start(pmc_id_t pmc)
3909 {
3910         struct pmc_op_simple    pmc_start_args;
3911
3912         pmc_start_args.pm_pmcid = pmc;
3913         return (PMC_CALL(PMCSTART, &pmc_start_args));
3914 }
3915
3916 int
3917 pmc_stop(pmc_id_t pmc)
3918 {
3919         struct pmc_op_simple    pmc_stop_args;
3920
3921         pmc_stop_args.pm_pmcid = pmc;
3922         return (PMC_CALL(PMCSTOP, &pmc_stop_args));
3923 }
3924
3925 int
3926 pmc_width(pmc_id_t pmcid, uint32_t *width)
3927 {
3928         unsigned int i;
3929         enum pmc_class cl;
3930
3931         cl = PMC_ID_TO_CLASS(pmcid);
3932         for (i = 0; i < cpu_info.pm_nclass; i++)
3933                 if (cpu_info.pm_classes[i].pm_class == cl) {
3934                         *width = cpu_info.pm_classes[i].pm_width;
3935                         return (0);
3936                 }
3937         errno = EINVAL;
3938         return (-1);
3939 }
3940
3941 int
3942 pmc_write(pmc_id_t pmc, pmc_value_t value)
3943 {
3944         struct pmc_op_pmcrw pmc_write_op;
3945
3946         pmc_write_op.pm_pmcid = pmc;
3947         pmc_write_op.pm_flags = PMC_F_NEWVALUE;
3948         pmc_write_op.pm_value = value;
3949         return (PMC_CALL(PMCRW, &pmc_write_op));
3950 }
3951
3952 int
3953 pmc_writelog(uint32_t userdata)
3954 {
3955         struct pmc_op_writelog wl;
3956
3957         wl.pm_userdata = userdata;
3958         return (PMC_CALL(WRITELOG, &wl));
3959 }