]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - lib/libpmc/libpmc.c
MFC r335669:
[FreeBSD/stable/10.git] / lib / libpmc / libpmc.c
1 /*-
2  * Copyright (c) 2003-2008 Joseph Koshy
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/module.h>
33 #include <sys/pmc.h>
34 #include <sys/syscall.h>
35
36 #include <ctype.h>
37 #include <errno.h>
38 #include <fcntl.h>
39 #include <pmc.h>
40 #include <stdio.h>
41 #include <stdlib.h>
42 #include <string.h>
43 #include <strings.h>
44 #include <unistd.h>
45
46 #include "libpmcinternal.h"
47
48 /* Function prototypes */
49 #if defined(__i386__)
50 static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
51     struct pmc_op_pmcallocate *_pmc_config);
52 #endif
53 #if defined(__amd64__) || defined(__i386__)
54 static int iaf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
55     struct pmc_op_pmcallocate *_pmc_config);
56 static int iap_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
57     struct pmc_op_pmcallocate *_pmc_config);
58 static int ucf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
59     struct pmc_op_pmcallocate *_pmc_config);
60 static int ucp_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
61     struct pmc_op_pmcallocate *_pmc_config);
62 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
63     struct pmc_op_pmcallocate *_pmc_config);
64 static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
65     struct pmc_op_pmcallocate *_pmc_config);
66 #endif
67 #if defined(__i386__)
68 static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
69     struct pmc_op_pmcallocate *_pmc_config);
70 static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
71     struct pmc_op_pmcallocate *_pmc_config);
72 #endif
73 #if defined(__amd64__) || defined(__i386__)
74 static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
75     struct pmc_op_pmcallocate *_pmc_config);
76 #endif
77 #if defined(__XSCALE__)
78 static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
79     struct pmc_op_pmcallocate *_pmc_config);
80 #endif
81 #if defined(__mips__)
82 static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec,
83                              struct pmc_op_pmcallocate *_pmc_config);
84 #endif /* __mips__ */
85 static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
86     struct pmc_op_pmcallocate *_pmc_config);
87
88 #if defined(__powerpc__)
89 static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec,
90                              struct pmc_op_pmcallocate *_pmc_config);
91 #endif /* __powerpc__ */
92
93 #define PMC_CALL(cmd, params)                           \
94         syscall(pmc_syscall, PMC_OP_##cmd, (params))
95
96 /*
97  * Event aliases provide a way for the user to ask for generic events
98  * like "cache-misses", or "instructions-retired".  These aliases are
99  * mapped to the appropriate canonical event descriptions using a
100  * lookup table.
101  */
102 struct pmc_event_alias {
103         const char      *pm_alias;
104         const char      *pm_spec;
105 };
106
107 static const struct pmc_event_alias *pmc_mdep_event_aliases;
108
109 /*
110  * The pmc_event_descr structure maps symbolic names known to the user
111  * to integer codes used by the PMC KLD.
112  */
113 struct pmc_event_descr {
114         const char      *pm_ev_name;
115         enum pmc_event  pm_ev_code;
116 };
117
118 /*
119  * The pmc_class_descr structure maps class name prefixes for
120  * event names to event tables and other PMC class data.
121  */
122 struct pmc_class_descr {
123         const char      *pm_evc_name;
124         size_t          pm_evc_name_size;
125         enum pmc_class  pm_evc_class;
126         const struct pmc_event_descr *pm_evc_event_table;
127         size_t          pm_evc_event_table_size;
128         int             (*pm_evc_allocate_pmc)(enum pmc_event _pe,
129                             char *_ctrspec, struct pmc_op_pmcallocate *_pa);
130 };
131
132 #define PMC_TABLE_SIZE(N)       (sizeof(N)/sizeof(N[0]))
133 #define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table)
134
135 #undef  __PMC_EV
136 #define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
137
138 /*
139  * PMC_CLASSDEP_TABLE(NAME, CLASS)
140  *
141  * Define a table mapping event names and aliases to HWPMC event IDs.
142  */
143 #define PMC_CLASSDEP_TABLE(N, C)                                \
144         static const struct pmc_event_descr N##_event_table[] = \
145         {                                                       \
146                 __PMC_EV_##C()                                  \
147         }
148
149 PMC_CLASSDEP_TABLE(iaf, IAF);
150 PMC_CLASSDEP_TABLE(k7, K7);
151 PMC_CLASSDEP_TABLE(k8, K8);
152 PMC_CLASSDEP_TABLE(p4, P4);
153 PMC_CLASSDEP_TABLE(p5, P5);
154 PMC_CLASSDEP_TABLE(p6, P6);
155 PMC_CLASSDEP_TABLE(xscale, XSCALE);
156 PMC_CLASSDEP_TABLE(mips24k, MIPS24K);
157 PMC_CLASSDEP_TABLE(octeon, OCTEON);
158 PMC_CLASSDEP_TABLE(ucf, UCF);
159 PMC_CLASSDEP_TABLE(ppc7450, PPC7450);
160 PMC_CLASSDEP_TABLE(ppc970, PPC970);
161
162 static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT];
163
164 #undef  __PMC_EV_ALIAS
165 #define __PMC_EV_ALIAS(N,CODE)  { N, PMC_EV_##CODE },
166
167 static const struct pmc_event_descr atom_event_table[] =
168 {
169         __PMC_EV_ALIAS_ATOM()
170 };
171
172 static const struct pmc_event_descr atom_silvermont_event_table[] =
173 {
174         __PMC_EV_ALIAS_ATOM_SILVERMONT()
175 };
176
177 static const struct pmc_event_descr core_event_table[] =
178 {
179         __PMC_EV_ALIAS_CORE()
180 };
181
182
183 static const struct pmc_event_descr core2_event_table[] =
184 {
185         __PMC_EV_ALIAS_CORE2()
186 };
187
188 static const struct pmc_event_descr corei7_event_table[] =
189 {
190         __PMC_EV_ALIAS_COREI7()
191 };
192
193 static const struct pmc_event_descr nehalem_ex_event_table[] =
194 {
195         __PMC_EV_ALIAS_COREI7()
196 };
197
198 static const struct pmc_event_descr haswell_event_table[] =
199 {
200         __PMC_EV_ALIAS_HASWELL()
201 };
202
203 static const struct pmc_event_descr haswell_xeon_event_table[] =
204 {
205         __PMC_EV_ALIAS_HASWELL_XEON()
206 };
207
208 static const struct pmc_event_descr broadwell_event_table[] =
209 {
210         __PMC_EV_ALIAS_BROADWELL()
211 };
212
213 static const struct pmc_event_descr broadwell_xeon_event_table[] =
214 {
215         __PMC_EV_ALIAS_BROADWELL_XEON()
216 };
217
218 static const struct pmc_event_descr skylake_event_table[] =
219 {
220         __PMC_EV_ALIAS_SKYLAKE()
221 };
222
223 static const struct pmc_event_descr ivybridge_event_table[] =
224 {
225         __PMC_EV_ALIAS_IVYBRIDGE()
226 };
227
228 static const struct pmc_event_descr ivybridge_xeon_event_table[] = 
229 {
230         __PMC_EV_ALIAS_IVYBRIDGE_XEON()
231 };
232
233 static const struct pmc_event_descr sandybridge_event_table[] = 
234 {
235         __PMC_EV_ALIAS_SANDYBRIDGE()
236 };
237
238 static const struct pmc_event_descr sandybridge_xeon_event_table[] = 
239 {
240         __PMC_EV_ALIAS_SANDYBRIDGE_XEON()
241 };
242
243 static const struct pmc_event_descr westmere_event_table[] =
244 {
245         __PMC_EV_ALIAS_WESTMERE()
246 };
247
248 static const struct pmc_event_descr westmere_ex_event_table[] =
249 {
250         __PMC_EV_ALIAS_WESTMERE()
251 };
252
253 static const struct pmc_event_descr corei7uc_event_table[] =
254 {
255         __PMC_EV_ALIAS_COREI7UC()
256 };
257
258 static const struct pmc_event_descr haswelluc_event_table[] =
259 {
260         __PMC_EV_ALIAS_HASWELLUC()
261 };
262
263 static const struct pmc_event_descr broadwelluc_event_table[] =
264 {
265         __PMC_EV_ALIAS_BROADWELLUC()
266 };
267
268 static const struct pmc_event_descr sandybridgeuc_event_table[] =
269 {
270         __PMC_EV_ALIAS_SANDYBRIDGEUC()
271 };
272
273 static const struct pmc_event_descr westmereuc_event_table[] =
274 {
275         __PMC_EV_ALIAS_WESTMEREUC()
276 };
277
278 /*
279  * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...)
280  *
281  * Map a CPU to the PMC classes it supports.
282  */
283 #define PMC_MDEP_TABLE(N,C,...)                         \
284         static const enum pmc_class N##_pmc_classes[] = {       \
285                 PMC_CLASS_##C, __VA_ARGS__                      \
286         }
287
288 PMC_MDEP_TABLE(atom, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
289 PMC_MDEP_TABLE(atom_silvermont, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
290 PMC_MDEP_TABLE(core, IAP, PMC_CLASS_SOFT, PMC_CLASS_TSC);
291 PMC_MDEP_TABLE(core2, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
292 PMC_MDEP_TABLE(corei7, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
293 PMC_MDEP_TABLE(nehalem_ex, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
294 PMC_MDEP_TABLE(haswell, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
295 PMC_MDEP_TABLE(haswell_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
296 PMC_MDEP_TABLE(broadwell, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
297 PMC_MDEP_TABLE(broadwell_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
298 PMC_MDEP_TABLE(skylake, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
299 PMC_MDEP_TABLE(ivybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
300 PMC_MDEP_TABLE(ivybridge_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
301 PMC_MDEP_TABLE(sandybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
302 PMC_MDEP_TABLE(sandybridge_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
303 PMC_MDEP_TABLE(westmere, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
304 PMC_MDEP_TABLE(westmere_ex, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
305 PMC_MDEP_TABLE(k7, K7, PMC_CLASS_SOFT, PMC_CLASS_TSC);
306 PMC_MDEP_TABLE(k8, K8, PMC_CLASS_SOFT, PMC_CLASS_TSC);
307 PMC_MDEP_TABLE(p4, P4, PMC_CLASS_SOFT, PMC_CLASS_TSC);
308 PMC_MDEP_TABLE(p5, P5, PMC_CLASS_SOFT, PMC_CLASS_TSC);
309 PMC_MDEP_TABLE(p6, P6, PMC_CLASS_SOFT, PMC_CLASS_TSC);
310 PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE);
311 PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K);
312 PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON);
313 PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_SOFT, PMC_CLASS_PPC7450);
314 PMC_MDEP_TABLE(ppc970, PPC970, PMC_CLASS_SOFT, PMC_CLASS_PPC970);
315 PMC_MDEP_TABLE(generic, SOFT, PMC_CLASS_SOFT);
316
317 static const struct pmc_event_descr tsc_event_table[] =
318 {
319         __PMC_EV_TSC()
320 };
321
322 #undef  PMC_CLASS_TABLE_DESC
323 #define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR)    \
324 static const struct pmc_class_descr NAME##_class_table_descr =  \
325         {                                                       \
326                 .pm_evc_name  = #CLASS "-",                     \
327                 .pm_evc_name_size = sizeof(#CLASS "-") - 1,     \
328                 .pm_evc_class = PMC_CLASS_##CLASS ,             \
329                 .pm_evc_event_table = EVENTS##_event_table ,    \
330                 .pm_evc_event_table_size =                      \
331                         PMC_EVENT_TABLE_SIZE(EVENTS),           \
332                 .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \
333         }
334
335 #if     defined(__i386__) || defined(__amd64__)
336 PMC_CLASS_TABLE_DESC(iaf, IAF, iaf, iaf);
337 PMC_CLASS_TABLE_DESC(atom, IAP, atom, iap);
338 PMC_CLASS_TABLE_DESC(atom_silvermont, IAP, atom_silvermont, iap);
339 PMC_CLASS_TABLE_DESC(core, IAP, core, iap);
340 PMC_CLASS_TABLE_DESC(core2, IAP, core2, iap);
341 PMC_CLASS_TABLE_DESC(corei7, IAP, corei7, iap);
342 PMC_CLASS_TABLE_DESC(nehalem_ex, IAP, nehalem_ex, iap);
343 PMC_CLASS_TABLE_DESC(haswell, IAP, haswell, iap);
344 PMC_CLASS_TABLE_DESC(haswell_xeon, IAP, haswell_xeon, iap);
345 PMC_CLASS_TABLE_DESC(broadwell, IAP, broadwell, iap);
346 PMC_CLASS_TABLE_DESC(broadwell_xeon, IAP, broadwell_xeon, iap);
347 PMC_CLASS_TABLE_DESC(skylake, IAP, skylake, iap);
348 PMC_CLASS_TABLE_DESC(ivybridge, IAP, ivybridge, iap);
349 PMC_CLASS_TABLE_DESC(ivybridge_xeon, IAP, ivybridge_xeon, iap);
350 PMC_CLASS_TABLE_DESC(sandybridge, IAP, sandybridge, iap);
351 PMC_CLASS_TABLE_DESC(sandybridge_xeon, IAP, sandybridge_xeon, iap);
352 PMC_CLASS_TABLE_DESC(westmere, IAP, westmere, iap);
353 PMC_CLASS_TABLE_DESC(westmere_ex, IAP, westmere_ex, iap);
354 PMC_CLASS_TABLE_DESC(ucf, UCF, ucf, ucf);
355 PMC_CLASS_TABLE_DESC(corei7uc, UCP, corei7uc, ucp);
356 PMC_CLASS_TABLE_DESC(haswelluc, UCP, haswelluc, ucp);
357 PMC_CLASS_TABLE_DESC(broadwelluc, UCP, broadwelluc, ucp);
358 PMC_CLASS_TABLE_DESC(sandybridgeuc, UCP, sandybridgeuc, ucp);
359 PMC_CLASS_TABLE_DESC(westmereuc, UCP, westmereuc, ucp);
360 #endif
361 #if     defined(__i386__)
362 PMC_CLASS_TABLE_DESC(k7, K7, k7, k7);
363 #endif
364 #if     defined(__i386__) || defined(__amd64__)
365 PMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
366 PMC_CLASS_TABLE_DESC(p4, P4, p4, p4);
367 #endif
368 #if     defined(__i386__)
369 PMC_CLASS_TABLE_DESC(p5, P5, p5, p5);
370 PMC_CLASS_TABLE_DESC(p6, P6, p6, p6);
371 #endif
372 #if     defined(__i386__) || defined(__amd64__)
373 PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
374 #endif
375 #if     defined(__XSCALE__)
376 PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale);
377 #endif
378 #if defined(__mips__)
379 PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips);
380 PMC_CLASS_TABLE_DESC(octeon, OCTEON, octeon, mips);
381 #endif /* __mips__ */
382 #if defined(__powerpc__)
383 PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc);
384 PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc);
385 #endif
386
387 static struct pmc_class_descr soft_class_table_descr =
388 {
389         .pm_evc_name  = "SOFT-",
390         .pm_evc_name_size = sizeof("SOFT-") - 1,
391         .pm_evc_class = PMC_CLASS_SOFT,
392         .pm_evc_event_table = NULL,
393         .pm_evc_event_table_size = 0,
394         .pm_evc_allocate_pmc = soft_allocate_pmc
395 };
396
397 #undef  PMC_CLASS_TABLE_DESC
398
399 static const struct pmc_class_descr **pmc_class_table;
400 #define PMC_CLASS_TABLE_SIZE    cpu_info.pm_nclass
401
402 static const enum pmc_class *pmc_mdep_class_list;
403 static size_t pmc_mdep_class_list_size;
404
405 /*
406  * Mapping tables, mapping enumeration values to human readable
407  * strings.
408  */
409
410 static const char * pmc_capability_names[] = {
411 #undef  __PMC_CAP
412 #define __PMC_CAP(N,V,D)        #N ,
413         __PMC_CAPS()
414 };
415
416 static const char * pmc_class_names[] = {
417 #undef  __PMC_CLASS
418 #define __PMC_CLASS(C)  #C ,
419         __PMC_CLASSES()
420 };
421
422 struct pmc_cputype_map {
423         enum pmc_cputype pm_cputype;
424         const char      *pm_name;
425 };
426
427 static const struct pmc_cputype_map pmc_cputype_names[] = {
428 #undef  __PMC_CPU
429 #define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } ,
430         __PMC_CPUS()
431 };
432
433 static const char * pmc_disposition_names[] = {
434 #undef  __PMC_DISP
435 #define __PMC_DISP(D)   #D ,
436         __PMC_DISPOSITIONS()
437 };
438
439 static const char * pmc_mode_names[] = {
440 #undef  __PMC_MODE
441 #define __PMC_MODE(M,N) #M ,
442         __PMC_MODES()
443 };
444
445 static const char * pmc_state_names[] = {
446 #undef  __PMC_STATE
447 #define __PMC_STATE(S) #S ,
448         __PMC_STATES()
449 };
450
451 /*
452  * Filled in by pmc_init().
453  */
454 static int pmc_syscall = -1;
455 static struct pmc_cpuinfo cpu_info;
456 static struct pmc_op_getdyneventinfo soft_event_info;
457
458 /* Event masks for events */
459 struct pmc_masks {
460         const char      *pm_name;
461         const uint64_t  pm_value;
462 };
463 #define PMCMASK(N,V)    { .pm_name = #N, .pm_value = (V) }
464 #define NULLMASK        { .pm_name = NULL }
465
466 #if defined(__amd64__) || defined(__i386__)
467 static int
468 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask)
469 {
470         const struct pmc_masks *pm;
471         char *q, *r;
472         int c;
473
474         if (pmask == NULL)      /* no mask keywords */
475                 return (-1);
476         q = strchr(p, '=');     /* skip '=' */
477         if (*++q == '\0')       /* no more data */
478                 return (-1);
479         c = 0;                  /* count of mask keywords seen */
480         while ((r = strsep(&q, "+")) != NULL) {
481                 for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name);
482                     pm++)
483                         ;
484                 if (pm->pm_name == NULL) /* not found */
485                         return (-1);
486                 *evmask |= pm->pm_value;
487                 c++;
488         }
489         return (c);
490 }
491 #endif
492
493 #define KWMATCH(p,kw)           (strcasecmp((p), (kw)) == 0)
494 #define KWPREFIXMATCH(p,kw)     (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
495 #define EV_ALIAS(N,S)           { .pm_alias = N, .pm_spec = S }
496
497 #if defined(__i386__)
498
499 /*
500  * AMD K7 (Athlon) CPUs.
501  */
502
503 static struct pmc_event_alias k7_aliases[] = {
504         EV_ALIAS("branches",            "k7-retired-branches"),
505         EV_ALIAS("branch-mispredicts",  "k7-retired-branches-mispredicted"),
506         EV_ALIAS("cycles",              "tsc"),
507         EV_ALIAS("dc-misses",           "k7-dc-misses"),
508         EV_ALIAS("ic-misses",           "k7-ic-misses"),
509         EV_ALIAS("instructions",        "k7-retired-instructions"),
510         EV_ALIAS("interrupts",          "k7-hardware-interrupts"),
511         EV_ALIAS(NULL, NULL)
512 };
513
514 #define K7_KW_COUNT     "count"
515 #define K7_KW_EDGE      "edge"
516 #define K7_KW_INV       "inv"
517 #define K7_KW_OS        "os"
518 #define K7_KW_UNITMASK  "unitmask"
519 #define K7_KW_USR       "usr"
520
521 static int
522 k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
523     struct pmc_op_pmcallocate *pmc_config)
524 {
525         char            *e, *p, *q;
526         int             c, has_unitmask;
527         uint32_t        count, unitmask;
528
529         pmc_config->pm_md.pm_amd.pm_amd_config = 0;
530         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
531
532         if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
533             pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
534             pe == PMC_EV_K7_DC_WRITEBACKS) {
535                 has_unitmask = 1;
536                 unitmask = AMD_PMC_UNITMASK_MOESI;
537         } else
538                 unitmask = has_unitmask = 0;
539
540         while ((p = strsep(&ctrspec, ",")) != NULL) {
541                 if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
542                         q = strchr(p, '=');
543                         if (*++q == '\0') /* skip '=' */
544                                 return (-1);
545
546                         count = strtol(q, &e, 0);
547                         if (e == q || *e != '\0')
548                                 return (-1);
549
550                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
551                         pmc_config->pm_md.pm_amd.pm_amd_config |=
552                             AMD_PMC_TO_COUNTER(count);
553
554                 } else if (KWMATCH(p, K7_KW_EDGE)) {
555                         pmc_config->pm_caps |= PMC_CAP_EDGE;
556                 } else if (KWMATCH(p, K7_KW_INV)) {
557                         pmc_config->pm_caps |= PMC_CAP_INVERT;
558                 } else if (KWMATCH(p, K7_KW_OS)) {
559                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
560                 } else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
561                         if (has_unitmask == 0)
562                                 return (-1);
563                         unitmask = 0;
564                         q = strchr(p, '=');
565                         if (*++q == '\0') /* skip '=' */
566                                 return (-1);
567
568                         while ((c = tolower(*q++)) != 0)
569                                 if (c == 'm')
570                                         unitmask |= AMD_PMC_UNITMASK_M;
571                                 else if (c == 'o')
572                                         unitmask |= AMD_PMC_UNITMASK_O;
573                                 else if (c == 'e')
574                                         unitmask |= AMD_PMC_UNITMASK_E;
575                                 else if (c == 's')
576                                         unitmask |= AMD_PMC_UNITMASK_S;
577                                 else if (c == 'i')
578                                         unitmask |= AMD_PMC_UNITMASK_I;
579                                 else if (c == '+')
580                                         continue;
581                                 else
582                                         return (-1);
583
584                         if (unitmask == 0)
585                                 return (-1);
586
587                 } else if (KWMATCH(p, K7_KW_USR)) {
588                         pmc_config->pm_caps |= PMC_CAP_USER;
589                 } else
590                         return (-1);
591         }
592
593         if (has_unitmask) {
594                 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
595                 pmc_config->pm_md.pm_amd.pm_amd_config |=
596                     AMD_PMC_TO_UNITMASK(unitmask);
597         }
598
599         return (0);
600
601 }
602
603 #endif
604
605 #if defined(__amd64__) || defined(__i386__)
606
607 /*
608  * Intel Core (Family 6, Model E) PMCs.
609  */
610
611 static struct pmc_event_alias core_aliases[] = {
612         EV_ALIAS("branches",            "iap-br-instr-ret"),
613         EV_ALIAS("branch-mispredicts",  "iap-br-mispred-ret"),
614         EV_ALIAS("cycles",              "tsc-tsc"),
615         EV_ALIAS("ic-misses",           "iap-icache-misses"),
616         EV_ALIAS("instructions",        "iap-instr-ret"),
617         EV_ALIAS("interrupts",          "iap-core-hw-int-rx"),
618         EV_ALIAS("unhalted-cycles",     "iap-unhalted-core-cycles"),
619         EV_ALIAS(NULL, NULL)
620 };
621
622 /*
623  * Intel Core2 (Family 6, Model F), Core2Extreme (Family 6, Model 17H)
624  * and Atom (Family 6, model 1CH) PMCs.
625  *
626  * We map aliases to events on the fixed-function counters if these
627  * are present.  Note that not all CPUs in this family contain fixed-function
628  * counters.
629  */
630
631 static struct pmc_event_alias core2_aliases[] = {
632         EV_ALIAS("branches",            "iap-br-inst-retired.any"),
633         EV_ALIAS("branch-mispredicts",  "iap-br-inst-retired.mispred"),
634         EV_ALIAS("cycles",              "tsc-tsc"),
635         EV_ALIAS("ic-misses",           "iap-l1i-misses"),
636         EV_ALIAS("instructions",        "iaf-instr-retired.any"),
637         EV_ALIAS("interrupts",          "iap-hw-int-rcv"),
638         EV_ALIAS("unhalted-cycles",     "iaf-cpu-clk-unhalted.core"),
639         EV_ALIAS(NULL, NULL)
640 };
641
642 static struct pmc_event_alias core2_aliases_without_iaf[] = {
643         EV_ALIAS("branches",            "iap-br-inst-retired.any"),
644         EV_ALIAS("branch-mispredicts",  "iap-br-inst-retired.mispred"),
645         EV_ALIAS("cycles",              "tsc-tsc"),
646         EV_ALIAS("ic-misses",           "iap-l1i-misses"),
647         EV_ALIAS("instructions",        "iap-inst-retired.any_p"),
648         EV_ALIAS("interrupts",          "iap-hw-int-rcv"),
649         EV_ALIAS("unhalted-cycles",     "iap-cpu-clk-unhalted.core_p"),
650         EV_ALIAS(NULL, NULL)
651 };
652
653 #define atom_aliases                    core2_aliases
654 #define atom_aliases_without_iaf        core2_aliases_without_iaf
655 #define atom_silvermont_aliases         core2_aliases
656 #define atom_silvermont_aliases_without_iaf     core2_aliases_without_iaf
657 #define corei7_aliases                  core2_aliases
658 #define corei7_aliases_without_iaf      core2_aliases_without_iaf
659 #define nehalem_ex_aliases              core2_aliases
660 #define nehalem_ex_aliases_without_iaf  core2_aliases_without_iaf
661 #define haswell_aliases                 core2_aliases
662 #define haswell_aliases_without_iaf     core2_aliases_without_iaf
663 #define haswell_xeon_aliases                    core2_aliases
664 #define haswell_xeon_aliases_without_iaf        core2_aliases_without_iaf
665 #define broadwell_aliases                       core2_aliases
666 #define broadwell_aliases_without_iaf   core2_aliases_without_iaf
667 #define broadwell_xeon_aliases                  core2_aliases
668 #define broadwell_xeon_aliases_without_iaf      core2_aliases_without_iaf
669 #define skylake_aliases                 core2_aliases
670 #define skylake_aliases_without_iaf     core2_aliases_without_iaf
671 #define ivybridge_aliases               core2_aliases
672 #define ivybridge_aliases_without_iaf   core2_aliases_without_iaf
673 #define ivybridge_xeon_aliases          core2_aliases
674 #define ivybridge_xeon_aliases_without_iaf      core2_aliases_without_iaf
675 #define sandybridge_aliases             core2_aliases
676 #define sandybridge_aliases_without_iaf core2_aliases_without_iaf
677 #define sandybridge_xeon_aliases        core2_aliases
678 #define sandybridge_xeon_aliases_without_iaf    core2_aliases_without_iaf
679 #define westmere_aliases                core2_aliases
680 #define westmere_aliases_without_iaf    core2_aliases_without_iaf
681 #define westmere_ex_aliases             core2_aliases
682 #define westmere_ex_aliases_without_iaf core2_aliases_without_iaf
683
684 #define IAF_KW_OS               "os"
685 #define IAF_KW_USR              "usr"
686 #define IAF_KW_ANYTHREAD        "anythread"
687
688 /*
689  * Parse an event specifier for Intel fixed function counters.
690  */
691 static int
692 iaf_allocate_pmc(enum pmc_event pe, char *ctrspec,
693     struct pmc_op_pmcallocate *pmc_config)
694 {
695         char *p;
696
697         (void) pe;
698
699         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
700         pmc_config->pm_md.pm_iaf.pm_iaf_flags = 0;
701
702         while ((p = strsep(&ctrspec, ",")) != NULL) {
703                 if (KWMATCH(p, IAF_KW_OS))
704                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
705                 else if (KWMATCH(p, IAF_KW_USR))
706                         pmc_config->pm_caps |= PMC_CAP_USER;
707                 else if (KWMATCH(p, IAF_KW_ANYTHREAD))
708                         pmc_config->pm_md.pm_iaf.pm_iaf_flags |= IAF_ANY;
709                 else
710                         return (-1);
711         }
712
713         return (0);
714 }
715
716 /*
717  * Core/Core2 support.
718  */
719
720 #define IAP_KW_AGENT            "agent"
721 #define IAP_KW_ANYTHREAD        "anythread"
722 #define IAP_KW_CACHESTATE       "cachestate"
723 #define IAP_KW_CMASK            "cmask"
724 #define IAP_KW_CORE             "core"
725 #define IAP_KW_EDGE             "edge"
726 #define IAP_KW_INV              "inv"
727 #define IAP_KW_OS               "os"
728 #define IAP_KW_PREFETCH         "prefetch"
729 #define IAP_KW_SNOOPRESPONSE    "snoopresponse"
730 #define IAP_KW_SNOOPTYPE        "snooptype"
731 #define IAP_KW_TRANSITION       "trans"
732 #define IAP_KW_USR              "usr"
733 #define IAP_KW_RSP              "rsp"
734
735 static struct pmc_masks iap_core_mask[] = {
736         PMCMASK(all,    (0x3 << 14)),
737         PMCMASK(this,   (0x1 << 14)),
738         NULLMASK
739 };
740
741 static struct pmc_masks iap_agent_mask[] = {
742         PMCMASK(this,   0),
743         PMCMASK(any,    (0x1 << 13)),
744         NULLMASK
745 };
746
747 static struct pmc_masks iap_prefetch_mask[] = {
748         PMCMASK(both,           (0x3 << 12)),
749         PMCMASK(only,           (0x1 << 12)),
750         PMCMASK(exclude,        0),
751         NULLMASK
752 };
753
754 static struct pmc_masks iap_cachestate_mask[] = {
755         PMCMASK(i,              (1 <<  8)),
756         PMCMASK(s,              (1 <<  9)),
757         PMCMASK(e,              (1 << 10)),
758         PMCMASK(m,              (1 << 11)),
759         NULLMASK
760 };
761
762 static struct pmc_masks iap_snoopresponse_mask[] = {
763         PMCMASK(clean,          (1 << 8)),
764         PMCMASK(hit,            (1 << 9)),
765         PMCMASK(hitm,           (1 << 11)),
766         NULLMASK
767 };
768
769 static struct pmc_masks iap_snooptype_mask[] = {
770         PMCMASK(cmp2s,          (1 << 8)),
771         PMCMASK(cmp2i,          (1 << 9)),
772         NULLMASK
773 };
774
775 static struct pmc_masks iap_transition_mask[] = {
776         PMCMASK(any,            0x00),
777         PMCMASK(frequency,      0x10),
778         NULLMASK
779 };
780
781 static struct pmc_masks iap_rsp_mask_i7_wm[] = {
782         PMCMASK(DMND_DATA_RD,           (1 <<  0)),
783         PMCMASK(DMND_RFO,               (1 <<  1)),
784         PMCMASK(DMND_IFETCH,            (1 <<  2)),
785         PMCMASK(WB,                     (1 <<  3)),
786         PMCMASK(PF_DATA_RD,             (1 <<  4)),
787         PMCMASK(PF_RFO,                 (1 <<  5)),
788         PMCMASK(PF_IFETCH,              (1 <<  6)),
789         PMCMASK(OTHER,                  (1 <<  7)),
790         PMCMASK(UNCORE_HIT,             (1 <<  8)),
791         PMCMASK(OTHER_CORE_HIT_SNP,     (1 <<  9)),
792         PMCMASK(OTHER_CORE_HITM,        (1 << 10)),
793         PMCMASK(REMOTE_CACHE_FWD,       (1 << 12)),
794         PMCMASK(REMOTE_DRAM,            (1 << 13)),
795         PMCMASK(LOCAL_DRAM,             (1 << 14)),
796         PMCMASK(NON_DRAM,               (1 << 15)),
797         NULLMASK
798 };
799
800 static struct pmc_masks iap_rsp_mask_sb_sbx_ib[] = {
801         PMCMASK(REQ_DMND_DATA_RD,       (1ULL <<  0)),
802         PMCMASK(REQ_DMND_RFO,           (1ULL <<  1)),
803         PMCMASK(REQ_DMND_IFETCH,        (1ULL <<  2)),
804         PMCMASK(REQ_WB,                 (1ULL <<  3)),
805         PMCMASK(REQ_PF_DATA_RD,         (1ULL <<  4)),
806         PMCMASK(REQ_PF_RFO,             (1ULL <<  5)),
807         PMCMASK(REQ_PF_IFETCH,          (1ULL <<  6)),
808         PMCMASK(REQ_PF_LLC_DATA_RD,     (1ULL <<  7)),
809         PMCMASK(REQ_PF_LLC_RFO,         (1ULL <<  8)),
810         PMCMASK(REQ_PF_LLC_IFETCH,      (1ULL <<  9)),
811         PMCMASK(REQ_BUS_LOCKS,          (1ULL << 10)),
812         PMCMASK(REQ_STRM_ST,            (1ULL << 11)),
813         PMCMASK(REQ_OTHER,              (1ULL << 15)),
814         PMCMASK(RES_ANY,                (1ULL << 16)),
815         PMCMASK(RES_SUPPLIER_SUPP,      (1ULL << 17)),
816         PMCMASK(RES_SUPPLIER_LLC_HITM,  (1ULL << 18)),
817         PMCMASK(RES_SUPPLIER_LLC_HITE,  (1ULL << 19)),
818         PMCMASK(RES_SUPPLIER_LLC_HITS,  (1ULL << 20)),
819         PMCMASK(RES_SUPPLIER_LLC_HITF,  (1ULL << 21)),
820         PMCMASK(RES_SUPPLIER_LOCAL,     (1ULL << 22)),
821         PMCMASK(RES_SNOOP_SNP_NONE,     (1ULL << 31)),
822         PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
823         PMCMASK(RES_SNOOP_SNP_MISS,     (1ULL << 33)),
824         PMCMASK(RES_SNOOP_HIT_NO_FWD,   (1ULL << 34)),
825         PMCMASK(RES_SNOOP_HIT_FWD,      (1ULL << 35)),
826         PMCMASK(RES_SNOOP_HITM,         (1ULL << 36)),
827         PMCMASK(RES_NON_DRAM,           (1ULL << 37)),
828         NULLMASK
829 };
830
831 /* Broadwell is defined to use the same mask as Haswell */
832 static struct pmc_masks iap_rsp_mask_haswell[] = {
833         PMCMASK(REQ_DMND_DATA_RD,       (1ULL <<  0)),
834         PMCMASK(REQ_DMND_RFO,           (1ULL <<  1)),
835         PMCMASK(REQ_DMND_IFETCH,        (1ULL <<  2)),
836         PMCMASK(REQ_PF_DATA_RD,         (1ULL <<  4)),
837         PMCMASK(REQ_PF_RFO,             (1ULL <<  5)),
838         PMCMASK(REQ_PF_IFETCH,          (1ULL <<  6)),
839         PMCMASK(REQ_OTHER,              (1ULL << 15)),
840         PMCMASK(RES_ANY,                (1ULL << 16)),
841         PMCMASK(RES_SUPPLIER_SUPP,      (1ULL << 17)),
842         PMCMASK(RES_SUPPLIER_LLC_HITM,  (1ULL << 18)),
843         PMCMASK(RES_SUPPLIER_LLC_HITE,  (1ULL << 19)),
844         PMCMASK(RES_SUPPLIER_LLC_HITS,  (1ULL << 20)),
845         PMCMASK(RES_SUPPLIER_LLC_HITF,  (1ULL << 21)),
846         PMCMASK(RES_SUPPLIER_LOCAL,     (1ULL << 22)),
847         /* 
848          * For processor type 06_45H 22 is L4_HIT_LOCAL_L4
849          * and 23, 24 and 25 are also defined.
850          */
851         PMCMASK(RES_SNOOP_SNP_NONE,     (1ULL << 31)),
852         PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
853         PMCMASK(RES_SNOOP_SNP_MISS,     (1ULL << 33)),
854         PMCMASK(RES_SNOOP_HIT_NO_FWD,   (1ULL << 34)),
855         PMCMASK(RES_SNOOP_HIT_FWD,      (1ULL << 35)),
856         PMCMASK(RES_SNOOP_HITM,         (1ULL << 36)),
857         PMCMASK(RES_NON_DRAM,           (1ULL << 37)),
858         NULLMASK
859 };
860
861 static struct pmc_masks iap_rsp_mask_skylake[] = {
862         PMCMASK(REQ_DMND_DATA_RD,       (1ULL <<  0)),
863         PMCMASK(REQ_DMND_RFO,           (1ULL <<  1)),
864         PMCMASK(REQ_DMND_IFETCH,        (1ULL <<  2)),
865         PMCMASK(REQ_PF_DATA_RD,         (1ULL <<  7)),
866         PMCMASK(REQ_PF_RFO,             (1ULL <<  8)),
867         PMCMASK(REQ_STRM_ST,            (1ULL << 11)),
868         PMCMASK(REQ_OTHER,              (1ULL << 15)),
869         PMCMASK(RES_ANY,                (1ULL << 16)),
870         PMCMASK(RES_SUPPLIER_SUPP,      (1ULL << 17)),
871         PMCMASK(RES_SUPPLIER_LLC_HITM,  (1ULL << 18)),
872         PMCMASK(RES_SUPPLIER_LLC_HITE,  (1ULL << 19)),
873         PMCMASK(RES_SUPPLIER_LLC_HITS,  (1ULL << 20)),
874         PMCMASK(RES_SUPPLIER_L4_HIT,    (1ULL << 22)),
875         PMCMASK(RES_SUPPLIER_DRAM,      (1ULL << 26)),
876         PMCMASK(RES_SUPPLIER_SPL_HIT,   (1ULL << 30)),
877         PMCMASK(RES_SNOOP_SNP_NONE,     (1ULL << 31)),
878         PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
879         PMCMASK(RES_SNOOP_SNP_MISS,     (1ULL << 33)),
880         PMCMASK(RES_SNOOP_HIT_NO_FWD,   (1ULL << 34)),
881         PMCMASK(RES_SNOOP_HIT_FWD,      (1ULL << 35)),
882         PMCMASK(RES_SNOOP_HITM,         (1ULL << 36)),
883         PMCMASK(RES_NON_DRAM,           (1ULL << 37)),
884         NULLMASK
885 };
886
887
888 static int
889 iap_allocate_pmc(enum pmc_event pe, char *ctrspec,
890     struct pmc_op_pmcallocate *pmc_config)
891 {
892         char *e, *p, *q;
893         uint64_t cachestate, evmask, rsp;
894         int count, n;
895
896         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
897             PMC_CAP_QUALIFIER);
898         pmc_config->pm_md.pm_iap.pm_iap_config = 0;
899
900         cachestate = evmask = rsp = 0;
901
902         /* Parse additional modifiers if present */
903         while ((p = strsep(&ctrspec, ",")) != NULL) {
904
905                 n = 0;
906                 if (KWPREFIXMATCH(p, IAP_KW_CMASK "=")) {
907                         q = strchr(p, '=');
908                         if (*++q == '\0') /* skip '=' */
909                                 return (-1);
910                         count = strtol(q, &e, 0);
911                         if (e == q || *e != '\0')
912                                 return (-1);
913                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
914                         pmc_config->pm_md.pm_iap.pm_iap_config |=
915                             IAP_CMASK(count);
916                 } else if (KWMATCH(p, IAP_KW_EDGE)) {
917                         pmc_config->pm_caps |= PMC_CAP_EDGE;
918                 } else if (KWMATCH(p, IAP_KW_INV)) {
919                         pmc_config->pm_caps |= PMC_CAP_INVERT;
920                 } else if (KWMATCH(p, IAP_KW_OS)) {
921                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
922                 } else if (KWMATCH(p, IAP_KW_USR)) {
923                         pmc_config->pm_caps |= PMC_CAP_USER;
924                 } else if (KWMATCH(p, IAP_KW_ANYTHREAD)) {
925                         pmc_config->pm_md.pm_iap.pm_iap_config |= IAP_ANY;
926                 } else if (KWPREFIXMATCH(p, IAP_KW_CORE "=")) {
927                         n = pmc_parse_mask(iap_core_mask, p, &evmask);
928                         if (n != 1)
929                                 return (-1);
930                 } else if (KWPREFIXMATCH(p, IAP_KW_AGENT "=")) {
931                         n = pmc_parse_mask(iap_agent_mask, p, &evmask);
932                         if (n != 1)
933                                 return (-1);
934                 } else if (KWPREFIXMATCH(p, IAP_KW_PREFETCH "=")) {
935                         n = pmc_parse_mask(iap_prefetch_mask, p, &evmask);
936                         if (n != 1)
937                                 return (-1);
938                 } else if (KWPREFIXMATCH(p, IAP_KW_CACHESTATE "=")) {
939                         n = pmc_parse_mask(iap_cachestate_mask, p, &cachestate);
940                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_CORE &&
941                     KWPREFIXMATCH(p, IAP_KW_TRANSITION "=")) {
942                         n = pmc_parse_mask(iap_transition_mask, p, &evmask);
943                         if (n != 1)
944                                 return (-1);
945                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM ||
946                     cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM_SILVERMONT ||
947                     cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2 ||
948                     cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2EXTREME) {
949                         if (KWPREFIXMATCH(p, IAP_KW_SNOOPRESPONSE "=")) {
950                                 n = pmc_parse_mask(iap_snoopresponse_mask, p,
951                                     &evmask);
952                         } else if (KWPREFIXMATCH(p, IAP_KW_SNOOPTYPE "=")) {
953                                 n = pmc_parse_mask(iap_snooptype_mask, p,
954                                     &evmask);
955                         } else
956                                 return (-1);
957                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_COREI7 ||
958                     cpu_info.pm_cputype == PMC_CPU_INTEL_WESTMERE ||
959                     cpu_info.pm_cputype == PMC_CPU_INTEL_NEHALEM_EX ||
960                     cpu_info.pm_cputype == PMC_CPU_INTEL_WESTMERE_EX) {
961                         if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
962                                 n = pmc_parse_mask(iap_rsp_mask_i7_wm, p, &rsp);
963                         } else
964                                 return (-1);
965                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_SANDYBRIDGE ||
966                     cpu_info.pm_cputype == PMC_CPU_INTEL_SANDYBRIDGE_XEON ||
967                         cpu_info.pm_cputype == PMC_CPU_INTEL_IVYBRIDGE ||
968                         cpu_info.pm_cputype == PMC_CPU_INTEL_IVYBRIDGE_XEON ) {
969                         if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
970                                 n = pmc_parse_mask(iap_rsp_mask_sb_sbx_ib, p, &rsp);
971                         } else
972                                 return (-1);
973                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_HASWELL ||
974                         cpu_info.pm_cputype == PMC_CPU_INTEL_HASWELL_XEON) {
975                         if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
976                                 n = pmc_parse_mask(iap_rsp_mask_haswell, p, &rsp);
977                         } else
978                                 return (-1);
979                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_BROADWELL ||
980                         cpu_info.pm_cputype == PMC_CPU_INTEL_BROADWELL_XEON) {
981                         /* Broadwell is defined to use same mask as haswell */
982                         if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
983                                 n = pmc_parse_mask(iap_rsp_mask_haswell, p, &rsp);
984                         } else
985                                 return (-1);
986
987                 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_SKYLAKE) {
988                         if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
989                                 n = pmc_parse_mask(iap_rsp_mask_skylake, p, &rsp);
990                         } else
991                                 return (-1);
992
993                 } else
994                         return (-1);
995
996                 if (n < 0)      /* Parsing failed. */
997                         return (-1);
998         }
999
1000         pmc_config->pm_md.pm_iap.pm_iap_config |= evmask;
1001
1002         /*
1003          * If the event requires a 'cachestate' qualifier but was not
1004          * specified by the user, use a sensible default.
1005          */
1006         switch (pe) {
1007         case PMC_EV_IAP_EVENT_28H: /* Core, Core2, Atom */
1008         case PMC_EV_IAP_EVENT_29H: /* Core, Core2, Atom */
1009         case PMC_EV_IAP_EVENT_2AH: /* Core, Core2, Atom */
1010         case PMC_EV_IAP_EVENT_2BH: /* Atom, Core2 */
1011         case PMC_EV_IAP_EVENT_2EH: /* Core, Core2, Atom */
1012         case PMC_EV_IAP_EVENT_30H: /* Core, Core2, Atom */
1013         case PMC_EV_IAP_EVENT_32H: /* Core */
1014         case PMC_EV_IAP_EVENT_40H: /* Core */
1015         case PMC_EV_IAP_EVENT_41H: /* Core */
1016         case PMC_EV_IAP_EVENT_42H: /* Core, Core2, Atom */
1017                 if (cachestate == 0)
1018                         cachestate = (0xF << 8);
1019                 break;
1020         case PMC_EV_IAP_EVENT_77H: /* Atom */
1021                 /* IAP_EVENT_77H only accepts a cachestate qualifier on the
1022                  * Atom processor
1023                  */
1024                 if(cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM && cachestate == 0)
1025                         cachestate = (0xF << 8);
1026             break;
1027         default:
1028                 break;
1029         }
1030
1031         pmc_config->pm_md.pm_iap.pm_iap_config |= cachestate;
1032         pmc_config->pm_md.pm_iap.pm_iap_rsp = rsp;
1033
1034         return (0);
1035 }
1036
1037 /*
1038  * Intel Uncore.
1039  */
1040
1041 static int
1042 ucf_allocate_pmc(enum pmc_event pe, char *ctrspec,
1043     struct pmc_op_pmcallocate *pmc_config)
1044 {
1045         (void) pe;
1046         (void) ctrspec;
1047
1048         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1049         pmc_config->pm_md.pm_ucf.pm_ucf_flags = 0;
1050
1051         return (0);
1052 }
1053
1054 #define UCP_KW_CMASK            "cmask"
1055 #define UCP_KW_EDGE             "edge"
1056 #define UCP_KW_INV              "inv"
1057
1058 static int
1059 ucp_allocate_pmc(enum pmc_event pe, char *ctrspec,
1060     struct pmc_op_pmcallocate *pmc_config)
1061 {
1062         char *e, *p, *q;
1063         int count, n;
1064
1065         (void) pe;
1066
1067         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
1068             PMC_CAP_QUALIFIER);
1069         pmc_config->pm_md.pm_ucp.pm_ucp_config = 0;
1070
1071         /* Parse additional modifiers if present */
1072         while ((p = strsep(&ctrspec, ",")) != NULL) {
1073
1074                 n = 0;
1075                 if (KWPREFIXMATCH(p, UCP_KW_CMASK "=")) {
1076                         q = strchr(p, '=');
1077                         if (*++q == '\0') /* skip '=' */
1078                                 return (-1);
1079                         count = strtol(q, &e, 0);
1080                         if (e == q || *e != '\0')
1081                                 return (-1);
1082                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1083                         pmc_config->pm_md.pm_ucp.pm_ucp_config |=
1084                             UCP_CMASK(count);
1085                 } else if (KWMATCH(p, UCP_KW_EDGE)) {
1086                         pmc_config->pm_caps |= PMC_CAP_EDGE;
1087                 } else if (KWMATCH(p, UCP_KW_INV)) {
1088                         pmc_config->pm_caps |= PMC_CAP_INVERT;
1089                 } else
1090                         return (-1);
1091
1092                 if (n < 0)      /* Parsing failed. */
1093                         return (-1);
1094         }
1095
1096         return (0);
1097 }
1098
1099 /*
1100  * AMD K8 PMCs.
1101  *
1102  * These are very similar to AMD K7 PMCs, but support more kinds of
1103  * events.
1104  */
1105
1106 static struct pmc_event_alias k8_aliases[] = {
1107         EV_ALIAS("branches",            "k8-fr-retired-taken-branches"),
1108         EV_ALIAS("branch-mispredicts",
1109             "k8-fr-retired-taken-branches-mispredicted"),
1110         EV_ALIAS("cycles",              "tsc"),
1111         EV_ALIAS("dc-misses",           "k8-dc-miss"),
1112         EV_ALIAS("ic-misses",           "k8-ic-miss"),
1113         EV_ALIAS("instructions",        "k8-fr-retired-x86-instructions"),
1114         EV_ALIAS("interrupts",          "k8-fr-taken-hardware-interrupts"),
1115         EV_ALIAS("unhalted-cycles",     "k8-bu-cpu-clk-unhalted"),
1116         EV_ALIAS(NULL, NULL)
1117 };
1118
1119 #define __K8MASK(N,V) PMCMASK(N,(1 << (V)))
1120
1121 /*
1122  * Parsing tables
1123  */
1124
1125 /* fp dispatched fpu ops */
1126 static const struct pmc_masks k8_mask_fdfo[] = {
1127         __K8MASK(add-pipe-excluding-junk-ops,   0),
1128         __K8MASK(multiply-pipe-excluding-junk-ops,      1),
1129         __K8MASK(store-pipe-excluding-junk-ops, 2),
1130         __K8MASK(add-pipe-junk-ops,             3),
1131         __K8MASK(multiply-pipe-junk-ops,        4),
1132         __K8MASK(store-pipe-junk-ops,           5),
1133         NULLMASK
1134 };
1135
1136 /* ls segment register loads */
1137 static const struct pmc_masks k8_mask_lsrl[] = {
1138         __K8MASK(es,    0),
1139         __K8MASK(cs,    1),
1140         __K8MASK(ss,    2),
1141         __K8MASK(ds,    3),
1142         __K8MASK(fs,    4),
1143         __K8MASK(gs,    5),
1144         __K8MASK(hs,    6),
1145         NULLMASK
1146 };
1147
1148 /* ls locked operation */
1149 static const struct pmc_masks k8_mask_llo[] = {
1150         __K8MASK(locked-instructions,   0),
1151         __K8MASK(cycles-in-request,     1),
1152         __K8MASK(cycles-to-complete,    2),
1153         NULLMASK
1154 };
1155
1156 /* dc refill from {l2,system} and dc copyback */
1157 static const struct pmc_masks k8_mask_dc[] = {
1158         __K8MASK(invalid,       0),
1159         __K8MASK(shared,        1),
1160         __K8MASK(exclusive,     2),
1161         __K8MASK(owner,         3),
1162         __K8MASK(modified,      4),
1163         NULLMASK
1164 };
1165
1166 /* dc one bit ecc error */
1167 static const struct pmc_masks k8_mask_dobee[] = {
1168         __K8MASK(scrubber,      0),
1169         __K8MASK(piggyback,     1),
1170         NULLMASK
1171 };
1172
1173 /* dc dispatched prefetch instructions */
1174 static const struct pmc_masks k8_mask_ddpi[] = {
1175         __K8MASK(load,  0),
1176         __K8MASK(store, 1),
1177         __K8MASK(nta,   2),
1178         NULLMASK
1179 };
1180
1181 /* dc dcache accesses by locks */
1182 static const struct pmc_masks k8_mask_dabl[] = {
1183         __K8MASK(accesses,      0),
1184         __K8MASK(misses,        1),
1185         NULLMASK
1186 };
1187
1188 /* bu internal l2 request */
1189 static const struct pmc_masks k8_mask_bilr[] = {
1190         __K8MASK(ic-fill,       0),
1191         __K8MASK(dc-fill,       1),
1192         __K8MASK(tlb-reload,    2),
1193         __K8MASK(tag-snoop,     3),
1194         __K8MASK(cancelled,     4),
1195         NULLMASK
1196 };
1197
1198 /* bu fill request l2 miss */
1199 static const struct pmc_masks k8_mask_bfrlm[] = {
1200         __K8MASK(ic-fill,       0),
1201         __K8MASK(dc-fill,       1),
1202         __K8MASK(tlb-reload,    2),
1203         NULLMASK
1204 };
1205
1206 /* bu fill into l2 */
1207 static const struct pmc_masks k8_mask_bfil[] = {
1208         __K8MASK(dirty-l2-victim,       0),
1209         __K8MASK(victim-from-l2,        1),
1210         NULLMASK
1211 };
1212
1213 /* fr retired fpu instructions */
1214 static const struct pmc_masks k8_mask_frfi[] = {
1215         __K8MASK(x87,                   0),
1216         __K8MASK(mmx-3dnow,             1),
1217         __K8MASK(packed-sse-sse2,       2),
1218         __K8MASK(scalar-sse-sse2,       3),
1219         NULLMASK
1220 };
1221
1222 /* fr retired fastpath double op instructions */
1223 static const struct pmc_masks k8_mask_frfdoi[] = {
1224         __K8MASK(low-op-pos-0,          0),
1225         __K8MASK(low-op-pos-1,          1),
1226         __K8MASK(low-op-pos-2,          2),
1227         NULLMASK
1228 };
1229
1230 /* fr fpu exceptions */
1231 static const struct pmc_masks k8_mask_ffe[] = {
1232         __K8MASK(x87-reclass-microfaults,       0),
1233         __K8MASK(sse-retype-microfaults,        1),
1234         __K8MASK(sse-reclass-microfaults,       2),
1235         __K8MASK(sse-and-x87-microtraps,        3),
1236         NULLMASK
1237 };
1238
1239 /* nb memory controller page access event */
1240 static const struct pmc_masks k8_mask_nmcpae[] = {
1241         __K8MASK(page-hit,      0),
1242         __K8MASK(page-miss,     1),
1243         __K8MASK(page-conflict, 2),
1244         NULLMASK
1245 };
1246
1247 /* nb memory controller turnaround */
1248 static const struct pmc_masks k8_mask_nmct[] = {
1249         __K8MASK(dimm-turnaround,               0),
1250         __K8MASK(read-to-write-turnaround,      1),
1251         __K8MASK(write-to-read-turnaround,      2),
1252         NULLMASK
1253 };
1254
1255 /* nb memory controller bypass saturation */
1256 static const struct pmc_masks k8_mask_nmcbs[] = {
1257         __K8MASK(memory-controller-hi-pri-bypass,       0),
1258         __K8MASK(memory-controller-lo-pri-bypass,       1),
1259         __K8MASK(dram-controller-interface-bypass,      2),
1260         __K8MASK(dram-controller-queue-bypass,          3),
1261         NULLMASK
1262 };
1263
1264 /* nb sized commands */
1265 static const struct pmc_masks k8_mask_nsc[] = {
1266         __K8MASK(nonpostwrszbyte,       0),
1267         __K8MASK(nonpostwrszdword,      1),
1268         __K8MASK(postwrszbyte,          2),
1269         __K8MASK(postwrszdword,         3),
1270         __K8MASK(rdszbyte,              4),
1271         __K8MASK(rdszdword,             5),
1272         __K8MASK(rdmodwr,               6),
1273         NULLMASK
1274 };
1275
1276 /* nb probe result */
1277 static const struct pmc_masks k8_mask_npr[] = {
1278         __K8MASK(probe-miss,            0),
1279         __K8MASK(probe-hit,             1),
1280         __K8MASK(probe-hit-dirty-no-memory-cancel, 2),
1281         __K8MASK(probe-hit-dirty-with-memory-cancel, 3),
1282         NULLMASK
1283 };
1284
1285 /* nb hypertransport bus bandwidth */
1286 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
1287         __K8MASK(command,       0),
1288         __K8MASK(data,  1),
1289         __K8MASK(buffer-release, 2),
1290         __K8MASK(nop,   3),
1291         NULLMASK
1292 };
1293
1294 #undef  __K8MASK
1295
1296 #define K8_KW_COUNT     "count"
1297 #define K8_KW_EDGE      "edge"
1298 #define K8_KW_INV       "inv"
1299 #define K8_KW_MASK      "mask"
1300 #define K8_KW_OS        "os"
1301 #define K8_KW_USR       "usr"
1302
1303 static int
1304 k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
1305     struct pmc_op_pmcallocate *pmc_config)
1306 {
1307         char            *e, *p, *q;
1308         int             n;
1309         uint32_t        count;
1310         uint64_t        evmask;
1311         const struct pmc_masks  *pm, *pmask;
1312
1313         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1314         pmc_config->pm_md.pm_amd.pm_amd_config = 0;
1315
1316         pmask = NULL;
1317         evmask = 0;
1318
1319 #define __K8SETMASK(M) pmask = k8_mask_##M
1320
1321         /* setup parsing tables */
1322         switch (pe) {
1323         case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1324                 __K8SETMASK(fdfo);
1325                 break;
1326         case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
1327                 __K8SETMASK(lsrl);
1328                 break;
1329         case PMC_EV_K8_LS_LOCKED_OPERATION:
1330                 __K8SETMASK(llo);
1331                 break;
1332         case PMC_EV_K8_DC_REFILL_FROM_L2:
1333         case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
1334         case PMC_EV_K8_DC_COPYBACK:
1335                 __K8SETMASK(dc);
1336                 break;
1337         case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
1338                 __K8SETMASK(dobee);
1339                 break;
1340         case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
1341                 __K8SETMASK(ddpi);
1342                 break;
1343         case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1344                 __K8SETMASK(dabl);
1345                 break;
1346         case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
1347                 __K8SETMASK(bilr);
1348                 break;
1349         case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
1350                 __K8SETMASK(bfrlm);
1351                 break;
1352         case PMC_EV_K8_BU_FILL_INTO_L2:
1353                 __K8SETMASK(bfil);
1354                 break;
1355         case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1356                 __K8SETMASK(frfi);
1357                 break;
1358         case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1359                 __K8SETMASK(frfdoi);
1360                 break;
1361         case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1362                 __K8SETMASK(ffe);
1363                 break;
1364         case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
1365                 __K8SETMASK(nmcpae);
1366                 break;
1367         case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
1368                 __K8SETMASK(nmct);
1369                 break;
1370         case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
1371                 __K8SETMASK(nmcbs);
1372                 break;
1373         case PMC_EV_K8_NB_SIZED_COMMANDS:
1374                 __K8SETMASK(nsc);
1375                 break;
1376         case PMC_EV_K8_NB_PROBE_RESULT:
1377                 __K8SETMASK(npr);
1378                 break;
1379         case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
1380         case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
1381         case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
1382                 __K8SETMASK(nhbb);
1383                 break;
1384
1385         default:
1386                 break;          /* no options defined */
1387         }
1388
1389         while ((p = strsep(&ctrspec, ",")) != NULL) {
1390                 if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
1391                         q = strchr(p, '=');
1392                         if (*++q == '\0') /* skip '=' */
1393                                 return (-1);
1394
1395                         count = strtol(q, &e, 0);
1396                         if (e == q || *e != '\0')
1397                                 return (-1);
1398
1399                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1400                         pmc_config->pm_md.pm_amd.pm_amd_config |=
1401                             AMD_PMC_TO_COUNTER(count);
1402
1403                 } else if (KWMATCH(p, K8_KW_EDGE)) {
1404                         pmc_config->pm_caps |= PMC_CAP_EDGE;
1405                 } else if (KWMATCH(p, K8_KW_INV)) {
1406                         pmc_config->pm_caps |= PMC_CAP_INVERT;
1407                 } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
1408                         if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1409                                 return (-1);
1410                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1411                 } else if (KWMATCH(p, K8_KW_OS)) {
1412                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1413                 } else if (KWMATCH(p, K8_KW_USR)) {
1414                         pmc_config->pm_caps |= PMC_CAP_USER;
1415                 } else
1416                         return (-1);
1417         }
1418
1419         /* other post processing */
1420         switch (pe) {
1421         case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1422         case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
1423         case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
1424         case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1425         case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1426         case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1427                 /* XXX only available in rev B and later */
1428                 break;
1429         case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1430                 /* XXX only available in rev C and later */
1431                 break;
1432         case PMC_EV_K8_LS_LOCKED_OPERATION:
1433                 /* XXX CPU Rev A,B evmask is to be zero */
1434                 if (evmask & (evmask - 1)) /* > 1 bit set */
1435                         return (-1);
1436                 if (evmask == 0) {
1437                         evmask = 0x01; /* Rev C and later: #instrs */
1438                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1439                 }
1440                 break;
1441         default:
1442                 if (evmask == 0 && pmask != NULL) {
1443                         for (pm = pmask; pm->pm_name; pm++)
1444                                 evmask |= pm->pm_value;
1445                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1446                 }
1447         }
1448
1449         if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1450                 pmc_config->pm_md.pm_amd.pm_amd_config =
1451                     AMD_PMC_TO_UNITMASK(evmask);
1452
1453         return (0);
1454 }
1455
1456 #endif
1457
1458 #if defined(__amd64__) || defined(__i386__)
1459
1460 /*
1461  * Intel P4 PMCs
1462  */
1463
1464 static struct pmc_event_alias p4_aliases[] = {
1465         EV_ALIAS("branches",            "p4-branch-retired,mask=mmtp+mmtm"),
1466         EV_ALIAS("branch-mispredicts",  "p4-mispred-branch-retired"),
1467         EV_ALIAS("cycles",              "tsc"),
1468         EV_ALIAS("instructions",
1469             "p4-instr-retired,mask=nbogusntag+nbogustag"),
1470         EV_ALIAS("unhalted-cycles",     "p4-global-power-events"),
1471         EV_ALIAS(NULL, NULL)
1472 };
1473
1474 #define P4_KW_ACTIVE    "active"
1475 #define P4_KW_ACTIVE_ANY "any"
1476 #define P4_KW_ACTIVE_BOTH "both"
1477 #define P4_KW_ACTIVE_NONE "none"
1478 #define P4_KW_ACTIVE_SINGLE "single"
1479 #define P4_KW_BUSREQTYPE "busreqtype"
1480 #define P4_KW_CASCADE   "cascade"
1481 #define P4_KW_EDGE      "edge"
1482 #define P4_KW_INV       "complement"
1483 #define P4_KW_OS        "os"
1484 #define P4_KW_MASK      "mask"
1485 #define P4_KW_PRECISE   "precise"
1486 #define P4_KW_TAG       "tag"
1487 #define P4_KW_THRESHOLD "threshold"
1488 #define P4_KW_USR       "usr"
1489
1490 #define __P4MASK(N,V) PMCMASK(N, (1 << (V)))
1491
1492 static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
1493         __P4MASK(dd, 0),
1494         __P4MASK(db, 1),
1495         __P4MASK(di, 2),
1496         __P4MASK(bd, 3),
1497         __P4MASK(bb, 4),
1498         __P4MASK(bi, 5),
1499         __P4MASK(id, 6),
1500         __P4MASK(ib, 7),
1501         NULLMASK
1502 };
1503
1504 static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
1505         __P4MASK(tcmiss, 0),
1506         NULLMASK,
1507 };
1508
1509 static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
1510         __P4MASK(hit, 0),
1511         __P4MASK(miss, 1),
1512         __P4MASK(hit-uc, 2),
1513         NULLMASK
1514 };
1515
1516 static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
1517         __P4MASK(st-rb-full, 2),
1518         __P4MASK(64k-conf, 3),
1519         NULLMASK
1520 };
1521
1522 static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
1523         __P4MASK(lsc, 0),
1524         __P4MASK(ssc, 1),
1525         NULLMASK
1526 };
1527
1528 static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
1529         __P4MASK(split-ld, 1),
1530         NULLMASK
1531 };
1532
1533 static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
1534         __P4MASK(split-st, 1),
1535         NULLMASK
1536 };
1537
1538 static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
1539         __P4MASK(no-sta, 1),
1540         __P4MASK(no-std, 3),
1541         __P4MASK(partial-data, 4),
1542         __P4MASK(unalgn-addr, 5),
1543         NULLMASK
1544 };
1545
1546 static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
1547         __P4MASK(dtmiss, 0),
1548         __P4MASK(itmiss, 1),
1549         NULLMASK
1550 };
1551
1552 static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
1553         __P4MASK(rd-2ndl-hits, 0),
1554         __P4MASK(rd-2ndl-hite, 1),
1555         __P4MASK(rd-2ndl-hitm, 2),
1556         __P4MASK(rd-3rdl-hits, 3),
1557         __P4MASK(rd-3rdl-hite, 4),
1558         __P4MASK(rd-3rdl-hitm, 5),
1559         __P4MASK(rd-2ndl-miss, 8),
1560         __P4MASK(rd-3rdl-miss, 9),
1561         __P4MASK(wr-2ndl-miss, 10),
1562         NULLMASK
1563 };
1564
1565 static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
1566         __P4MASK(all-read, 5),
1567         __P4MASK(all-write, 6),
1568         __P4MASK(mem-uc, 7),
1569         __P4MASK(mem-wc, 8),
1570         __P4MASK(mem-wt, 9),
1571         __P4MASK(mem-wp, 10),
1572         __P4MASK(mem-wb, 11),
1573         __P4MASK(own, 13),
1574         __P4MASK(other, 14),
1575         __P4MASK(prefetch, 15),
1576         NULLMASK
1577 };
1578
1579 static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
1580         __P4MASK(all-read, 5),
1581         __P4MASK(all-write, 6),
1582         __P4MASK(mem-uc, 7),
1583         __P4MASK(mem-wc, 8),
1584         __P4MASK(mem-wt, 9),
1585         __P4MASK(mem-wp, 10),
1586         __P4MASK(mem-wb, 11),
1587         __P4MASK(own, 13),
1588         __P4MASK(other, 14),
1589         __P4MASK(prefetch, 15),
1590         NULLMASK
1591 };
1592
1593 static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
1594         __P4MASK(drdy-drv, 0),
1595         __P4MASK(drdy-own, 1),
1596         __P4MASK(drdy-other, 2),
1597         __P4MASK(dbsy-drv, 3),
1598         __P4MASK(dbsy-own, 4),
1599         __P4MASK(dbsy-other, 5),
1600         NULLMASK
1601 };
1602
1603 static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
1604         __P4MASK(req-type0, 0),
1605         __P4MASK(req-type1, 1),
1606         __P4MASK(req-len0, 2),
1607         __P4MASK(req-len1, 3),
1608         __P4MASK(req-io-type, 5),
1609         __P4MASK(req-lock-type, 6),
1610         __P4MASK(req-cache-type, 7),
1611         __P4MASK(req-split-type, 8),
1612         __P4MASK(req-dem-type, 9),
1613         __P4MASK(req-ord-type, 10),
1614         __P4MASK(mem-type0, 11),
1615         __P4MASK(mem-type1, 12),
1616         __P4MASK(mem-type2, 13),
1617         NULLMASK
1618 };
1619
1620 static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
1621         __P4MASK(all, 15),
1622         NULLMASK
1623 };
1624
1625 static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
1626         __P4MASK(all, 15),
1627         NULLMASK
1628 };
1629
1630 static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
1631         __P4MASK(all, 15),
1632         NULLMASK
1633 };
1634
1635 static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
1636         __P4MASK(all, 15),
1637         NULLMASK
1638 };
1639
1640 static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
1641         __P4MASK(all, 15),
1642         NULLMASK
1643 };
1644
1645 static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
1646         __P4MASK(all, 15),
1647         NULLMASK
1648 };
1649
1650 static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
1651         __P4MASK(all, 15),
1652         NULLMASK
1653 };
1654
1655 static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
1656         __P4MASK(all, 15),
1657         NULLMASK
1658 };
1659
1660 static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
1661         __P4MASK(allp0, 3),
1662         __P4MASK(allp2, 4),
1663         NULLMASK
1664 };
1665
1666 static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
1667         __P4MASK(running, 0),
1668         NULLMASK
1669 };
1670
1671 static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
1672         __P4MASK(cisc, 0),
1673         NULLMASK
1674 };
1675
1676 static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
1677         __P4MASK(from-tc-build, 0),
1678         __P4MASK(from-tc-deliver, 1),
1679         __P4MASK(from-rom, 2),
1680         NULLMASK
1681 };
1682
1683 static const struct pmc_masks p4_mask_rmbt[] = {
1684         /* retired mispred branch type */
1685         __P4MASK(conditional, 1),
1686         __P4MASK(call, 2),
1687         __P4MASK(return, 3),
1688         __P4MASK(indirect, 4),
1689         NULLMASK
1690 };
1691
1692 static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
1693         __P4MASK(conditional, 1),
1694         __P4MASK(call, 2),
1695         __P4MASK(retired, 3),
1696         __P4MASK(indirect, 4),
1697         NULLMASK
1698 };
1699
1700 static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
1701         __P4MASK(sbfull, 5),
1702         NULLMASK
1703 };
1704
1705 static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
1706         __P4MASK(wcb-evicts, 0),
1707         __P4MASK(wcb-full-evict, 1),
1708         NULLMASK
1709 };
1710
1711 static const struct pmc_masks p4_mask_fee[] = { /* front end event */
1712         __P4MASK(nbogus, 0),
1713         __P4MASK(bogus, 1),
1714         NULLMASK
1715 };
1716
1717 static const struct pmc_masks p4_mask_ee[] = { /* execution event */
1718         __P4MASK(nbogus0, 0),
1719         __P4MASK(nbogus1, 1),
1720         __P4MASK(nbogus2, 2),
1721         __P4MASK(nbogus3, 3),
1722         __P4MASK(bogus0, 4),
1723         __P4MASK(bogus1, 5),
1724         __P4MASK(bogus2, 6),
1725         __P4MASK(bogus3, 7),
1726         NULLMASK
1727 };
1728
1729 static const struct pmc_masks p4_mask_re[] = { /* replay event */
1730         __P4MASK(nbogus, 0),
1731         __P4MASK(bogus, 1),
1732         NULLMASK
1733 };
1734
1735 static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
1736         __P4MASK(nbogusntag, 0),
1737         __P4MASK(nbogustag, 1),
1738         __P4MASK(bogusntag, 2),
1739         __P4MASK(bogustag, 3),
1740         NULLMASK
1741 };
1742
1743 static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
1744         __P4MASK(nbogus, 0),
1745         __P4MASK(bogus, 1),
1746         NULLMASK
1747 };
1748
1749 static const struct pmc_masks p4_mask_ut[] = { /* uop type */
1750         __P4MASK(tagloads, 1),
1751         __P4MASK(tagstores, 2),
1752         NULLMASK
1753 };
1754
1755 static const struct pmc_masks p4_mask_br[] = { /* branch retired */
1756         __P4MASK(mmnp, 0),
1757         __P4MASK(mmnm, 1),
1758         __P4MASK(mmtp, 2),
1759         __P4MASK(mmtm, 3),
1760         NULLMASK
1761 };
1762
1763 static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
1764         __P4MASK(nbogus, 0),
1765         NULLMASK
1766 };
1767
1768 static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
1769         __P4MASK(fpsu, 0),
1770         __P4MASK(fpso, 1),
1771         __P4MASK(poao, 2),
1772         __P4MASK(poau, 3),
1773         __P4MASK(prea, 4),
1774         NULLMASK
1775 };
1776
1777 static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
1778         __P4MASK(clear, 0),
1779         __P4MASK(moclear, 2),
1780         __P4MASK(smclear, 3),
1781         NULLMASK
1782 };
1783
1784 /* P4 event parser */
1785 static int
1786 p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
1787     struct pmc_op_pmcallocate *pmc_config)
1788 {
1789
1790         char    *e, *p, *q;
1791         int     count, has_tag, has_busreqtype, n;
1792         uint32_t cccractivemask;
1793         uint64_t evmask;
1794         const struct pmc_masks *pm, *pmask;
1795
1796         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1797         pmc_config->pm_md.pm_p4.pm_p4_cccrconfig =
1798             pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0;
1799
1800         pmask   = NULL;
1801         evmask  = 0;
1802         cccractivemask = 0x3;
1803         has_tag = has_busreqtype = 0;
1804
1805 #define __P4SETMASK(M) do {                             \
1806         pmask = p4_mask_##M;                            \
1807 } while (0)
1808
1809         switch (pe) {
1810         case PMC_EV_P4_TC_DELIVER_MODE:
1811                 __P4SETMASK(tcdm);
1812                 break;
1813         case PMC_EV_P4_BPU_FETCH_REQUEST:
1814                 __P4SETMASK(bfr);
1815                 break;
1816         case PMC_EV_P4_ITLB_REFERENCE:
1817                 __P4SETMASK(ir);
1818                 break;
1819         case PMC_EV_P4_MEMORY_CANCEL:
1820                 __P4SETMASK(memcan);
1821                 break;
1822         case PMC_EV_P4_MEMORY_COMPLETE:
1823                 __P4SETMASK(memcomp);
1824                 break;
1825         case PMC_EV_P4_LOAD_PORT_REPLAY:
1826                 __P4SETMASK(lpr);
1827                 break;
1828         case PMC_EV_P4_STORE_PORT_REPLAY:
1829                 __P4SETMASK(spr);
1830                 break;
1831         case PMC_EV_P4_MOB_LOAD_REPLAY:
1832                 __P4SETMASK(mlr);
1833                 break;
1834         case PMC_EV_P4_PAGE_WALK_TYPE:
1835                 __P4SETMASK(pwt);
1836                 break;
1837         case PMC_EV_P4_BSQ_CACHE_REFERENCE:
1838                 __P4SETMASK(bcr);
1839                 break;
1840         case PMC_EV_P4_IOQ_ALLOCATION:
1841                 __P4SETMASK(ia);
1842                 has_busreqtype = 1;
1843                 break;
1844         case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
1845                 __P4SETMASK(iae);
1846                 has_busreqtype = 1;
1847                 break;
1848         case PMC_EV_P4_FSB_DATA_ACTIVITY:
1849                 __P4SETMASK(fda);
1850                 break;
1851         case PMC_EV_P4_BSQ_ALLOCATION:
1852                 __P4SETMASK(ba);
1853                 break;
1854         case PMC_EV_P4_SSE_INPUT_ASSIST:
1855                 __P4SETMASK(sia);
1856                 break;
1857         case PMC_EV_P4_PACKED_SP_UOP:
1858                 __P4SETMASK(psu);
1859                 break;
1860         case PMC_EV_P4_PACKED_DP_UOP:
1861                 __P4SETMASK(pdu);
1862                 break;
1863         case PMC_EV_P4_SCALAR_SP_UOP:
1864                 __P4SETMASK(ssu);
1865                 break;
1866         case PMC_EV_P4_SCALAR_DP_UOP:
1867                 __P4SETMASK(sdu);
1868                 break;
1869         case PMC_EV_P4_64BIT_MMX_UOP:
1870                 __P4SETMASK(64bmu);
1871                 break;
1872         case PMC_EV_P4_128BIT_MMX_UOP:
1873                 __P4SETMASK(128bmu);
1874                 break;
1875         case PMC_EV_P4_X87_FP_UOP:
1876                 __P4SETMASK(xfu);
1877                 break;
1878         case PMC_EV_P4_X87_SIMD_MOVES_UOP:
1879                 __P4SETMASK(xsmu);
1880                 break;
1881         case PMC_EV_P4_GLOBAL_POWER_EVENTS:
1882                 __P4SETMASK(gpe);
1883                 break;
1884         case PMC_EV_P4_TC_MS_XFER:
1885                 __P4SETMASK(tmx);
1886                 break;
1887         case PMC_EV_P4_UOP_QUEUE_WRITES:
1888                 __P4SETMASK(uqw);
1889                 break;
1890         case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
1891                 __P4SETMASK(rmbt);
1892                 break;
1893         case PMC_EV_P4_RETIRED_BRANCH_TYPE:
1894                 __P4SETMASK(rbt);
1895                 break;
1896         case PMC_EV_P4_RESOURCE_STALL:
1897                 __P4SETMASK(rs);
1898                 break;
1899         case PMC_EV_P4_WC_BUFFER:
1900                 __P4SETMASK(wb);
1901                 break;
1902         case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
1903         case PMC_EV_P4_B2B_CYCLES:
1904         case PMC_EV_P4_BNR:
1905         case PMC_EV_P4_SNOOP:
1906         case PMC_EV_P4_RESPONSE:
1907                 break;
1908         case PMC_EV_P4_FRONT_END_EVENT:
1909                 __P4SETMASK(fee);
1910                 break;
1911         case PMC_EV_P4_EXECUTION_EVENT:
1912                 __P4SETMASK(ee);
1913                 break;
1914         case PMC_EV_P4_REPLAY_EVENT:
1915                 __P4SETMASK(re);
1916                 break;
1917         case PMC_EV_P4_INSTR_RETIRED:
1918                 __P4SETMASK(insret);
1919                 break;
1920         case PMC_EV_P4_UOPS_RETIRED:
1921                 __P4SETMASK(ur);
1922                 break;
1923         case PMC_EV_P4_UOP_TYPE:
1924                 __P4SETMASK(ut);
1925                 break;
1926         case PMC_EV_P4_BRANCH_RETIRED:
1927                 __P4SETMASK(br);
1928                 break;
1929         case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
1930                 __P4SETMASK(mbr);
1931                 break;
1932         case PMC_EV_P4_X87_ASSIST:
1933                 __P4SETMASK(xa);
1934                 break;
1935         case PMC_EV_P4_MACHINE_CLEAR:
1936                 __P4SETMASK(machclr);
1937                 break;
1938         default:
1939                 return (-1);
1940         }
1941
1942         /* process additional flags */
1943         while ((p = strsep(&ctrspec, ",")) != NULL) {
1944                 if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
1945                         q = strchr(p, '=');
1946                         if (*++q == '\0') /* skip '=' */
1947                                 return (-1);
1948
1949                         if (strcasecmp(q, P4_KW_ACTIVE_NONE) == 0)
1950                                 cccractivemask = 0x0;
1951                         else if (strcasecmp(q, P4_KW_ACTIVE_SINGLE) == 0)
1952                                 cccractivemask = 0x1;
1953                         else if (strcasecmp(q, P4_KW_ACTIVE_BOTH) == 0)
1954                                 cccractivemask = 0x2;
1955                         else if (strcasecmp(q, P4_KW_ACTIVE_ANY) == 0)
1956                                 cccractivemask = 0x3;
1957                         else
1958                                 return (-1);
1959
1960                 } else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
1961                         if (has_busreqtype == 0)
1962                                 return (-1);
1963
1964                         q = strchr(p, '=');
1965                         if (*++q == '\0') /* skip '=' */
1966                                 return (-1);
1967
1968                         count = strtol(q, &e, 0);
1969                         if (e == q || *e != '\0')
1970                                 return (-1);
1971                         evmask = (evmask & ~0x1F) | (count & 0x1F);
1972                 } else if (KWMATCH(p, P4_KW_CASCADE))
1973                         pmc_config->pm_caps |= PMC_CAP_CASCADE;
1974                 else if (KWMATCH(p, P4_KW_EDGE))
1975                         pmc_config->pm_caps |= PMC_CAP_EDGE;
1976                 else if (KWMATCH(p, P4_KW_INV))
1977                         pmc_config->pm_caps |= PMC_CAP_INVERT;
1978                 else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
1979                         if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1980                                 return (-1);
1981                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1982                 } else if (KWMATCH(p, P4_KW_OS))
1983                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1984                 else if (KWMATCH(p, P4_KW_PRECISE))
1985                         pmc_config->pm_caps |= PMC_CAP_PRECISE;
1986                 else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
1987                         if (has_tag == 0)
1988                                 return (-1);
1989
1990                         q = strchr(p, '=');
1991                         if (*++q == '\0') /* skip '=' */
1992                                 return (-1);
1993
1994                         count = strtol(q, &e, 0);
1995                         if (e == q || *e != '\0')
1996                                 return (-1);
1997
1998                         pmc_config->pm_caps |= PMC_CAP_TAGGING;
1999                         pmc_config->pm_md.pm_p4.pm_p4_escrconfig |=
2000                             P4_ESCR_TO_TAG_VALUE(count);
2001                 } else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
2002                         q = strchr(p, '=');
2003                         if (*++q == '\0') /* skip '=' */
2004                                 return (-1);
2005
2006                         count = strtol(q, &e, 0);
2007                         if (e == q || *e != '\0')
2008                                 return (-1);
2009
2010                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
2011                         pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &=
2012                             ~P4_CCCR_THRESHOLD_MASK;
2013                         pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
2014                             P4_CCCR_TO_THRESHOLD(count);
2015                 } else if (KWMATCH(p, P4_KW_USR))
2016                         pmc_config->pm_caps |= PMC_CAP_USER;
2017                 else
2018                         return (-1);
2019         }
2020
2021         /* other post processing */
2022         if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
2023             pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
2024             pe == PMC_EV_P4_BSQ_ALLOCATION)
2025                 pmc_config->pm_caps |= PMC_CAP_EDGE;
2026
2027         /* fill in thread activity mask */
2028         pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
2029             P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
2030
2031         if (evmask)
2032                 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2033
2034         switch (pe) {
2035         case PMC_EV_P4_FSB_DATA_ACTIVITY:
2036                 if ((evmask & 0x06) == 0x06 ||
2037                     (evmask & 0x18) == 0x18)
2038                         return (-1); /* can't have own+other bits together */
2039                 if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
2040                         evmask = 0x1D;
2041                 break;
2042         case PMC_EV_P4_MACHINE_CLEAR:
2043                 /* only one bit is allowed to be set */
2044                 if ((evmask & (evmask - 1)) != 0)
2045                         return (-1);
2046                 if (evmask == 0) {
2047                         evmask = 0x1;   /* 'CLEAR' */
2048                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2049                 }
2050                 break;
2051         default:
2052                 if (evmask == 0 && pmask) {
2053                         for (pm = pmask; pm->pm_name; pm++)
2054                                 evmask |= pm->pm_value;
2055                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2056                 }
2057         }
2058
2059         pmc_config->pm_md.pm_p4.pm_p4_escrconfig =
2060             P4_ESCR_TO_EVENT_MASK(evmask);
2061
2062         return (0);
2063 }
2064
2065 #endif
2066
2067 #if defined(__i386__)
2068
2069 /*
2070  * Pentium style PMCs
2071  */
2072
2073 static struct pmc_event_alias p5_aliases[] = {
2074         EV_ALIAS("branches",            "p5-taken-branches"),
2075         EV_ALIAS("cycles",              "tsc"),
2076         EV_ALIAS("dc-misses",           "p5-data-read-miss-or-write-miss"),
2077         EV_ALIAS("ic-misses",           "p5-code-cache-miss"),
2078         EV_ALIAS("instructions",        "p5-instructions-executed"),
2079         EV_ALIAS("interrupts",          "p5-hardware-interrupts"),
2080         EV_ALIAS("unhalted-cycles",
2081             "p5-number-of-cycles-not-in-halt-state"),
2082         EV_ALIAS(NULL, NULL)
2083 };
2084
2085 static int
2086 p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
2087     struct pmc_op_pmcallocate *pmc_config)
2088 {
2089         return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */
2090 }
2091
2092 /*
2093  * Pentium Pro style PMCs.  These PMCs are found in Pentium II, Pentium III,
2094  * and Pentium M CPUs.
2095  */
2096
2097 static struct pmc_event_alias p6_aliases[] = {
2098         EV_ALIAS("branches",            "p6-br-inst-retired"),
2099         EV_ALIAS("branch-mispredicts",  "p6-br-miss-pred-retired"),
2100         EV_ALIAS("cycles",              "tsc"),
2101         EV_ALIAS("dc-misses",           "p6-dcu-lines-in"),
2102         EV_ALIAS("ic-misses",           "p6-ifu-fetch-miss"),
2103         EV_ALIAS("instructions",        "p6-inst-retired"),
2104         EV_ALIAS("interrupts",          "p6-hw-int-rx"),
2105         EV_ALIAS("unhalted-cycles",     "p6-cpu-clk-unhalted"),
2106         EV_ALIAS(NULL, NULL)
2107 };
2108
2109 #define P6_KW_CMASK     "cmask"
2110 #define P6_KW_EDGE      "edge"
2111 #define P6_KW_INV       "inv"
2112 #define P6_KW_OS        "os"
2113 #define P6_KW_UMASK     "umask"
2114 #define P6_KW_USR       "usr"
2115
2116 static struct pmc_masks p6_mask_mesi[] = {
2117         PMCMASK(m,      0x01),
2118         PMCMASK(e,      0x02),
2119         PMCMASK(s,      0x04),
2120         PMCMASK(i,      0x08),
2121         NULLMASK
2122 };
2123
2124 static struct pmc_masks p6_mask_mesihw[] = {
2125         PMCMASK(m,      0x01),
2126         PMCMASK(e,      0x02),
2127         PMCMASK(s,      0x04),
2128         PMCMASK(i,      0x08),
2129         PMCMASK(nonhw,  0x00),
2130         PMCMASK(hw,     0x10),
2131         PMCMASK(both,   0x30),
2132         NULLMASK
2133 };
2134
2135 static struct pmc_masks p6_mask_hw[] = {
2136         PMCMASK(nonhw,  0x00),
2137         PMCMASK(hw,     0x10),
2138         PMCMASK(both,   0x30),
2139         NULLMASK
2140 };
2141
2142 static struct pmc_masks p6_mask_any[] = {
2143         PMCMASK(self,   0x00),
2144         PMCMASK(any,    0x20),
2145         NULLMASK
2146 };
2147
2148 static struct pmc_masks p6_mask_ekp[] = {
2149         PMCMASK(nta,    0x00),
2150         PMCMASK(t1,     0x01),
2151         PMCMASK(t2,     0x02),
2152         PMCMASK(wos,    0x03),
2153         NULLMASK
2154 };
2155
2156 static struct pmc_masks p6_mask_pps[] = {
2157         PMCMASK(packed-and-scalar, 0x00),
2158         PMCMASK(scalar, 0x01),
2159         NULLMASK
2160 };
2161
2162 static struct pmc_masks p6_mask_mite[] = {
2163         PMCMASK(packed-multiply,         0x01),
2164         PMCMASK(packed-shift,           0x02),
2165         PMCMASK(pack,                   0x04),
2166         PMCMASK(unpack,                 0x08),
2167         PMCMASK(packed-logical,         0x10),
2168         PMCMASK(packed-arithmetic,      0x20),
2169         NULLMASK
2170 };
2171
2172 static struct pmc_masks p6_mask_fmt[] = {
2173         PMCMASK(mmxtofp,        0x00),
2174         PMCMASK(fptommx,        0x01),
2175         NULLMASK
2176 };
2177
2178 static struct pmc_masks p6_mask_sr[] = {
2179         PMCMASK(es,     0x01),
2180         PMCMASK(ds,     0x02),
2181         PMCMASK(fs,     0x04),
2182         PMCMASK(gs,     0x08),
2183         NULLMASK
2184 };
2185
2186 static struct pmc_masks p6_mask_eet[] = {
2187         PMCMASK(all,    0x00),
2188         PMCMASK(freq,   0x02),
2189         NULLMASK
2190 };
2191
2192 static struct pmc_masks p6_mask_efur[] = {
2193         PMCMASK(all,    0x00),
2194         PMCMASK(loadop, 0x01),
2195         PMCMASK(stdsta, 0x02),
2196         NULLMASK
2197 };
2198
2199 static struct pmc_masks p6_mask_essir[] = {
2200         PMCMASK(sse-packed-single,      0x00),
2201         PMCMASK(sse-packed-single-scalar-single, 0x01),
2202         PMCMASK(sse2-packed-double,     0x02),
2203         PMCMASK(sse2-scalar-double,     0x03),
2204         NULLMASK
2205 };
2206
2207 static struct pmc_masks p6_mask_esscir[] = {
2208         PMCMASK(sse-packed-single,      0x00),
2209         PMCMASK(sse-scalar-single,      0x01),
2210         PMCMASK(sse2-packed-double,     0x02),
2211         PMCMASK(sse2-scalar-double,     0x03),
2212         NULLMASK
2213 };
2214
2215 /* P6 event parser */
2216 static int
2217 p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
2218     struct pmc_op_pmcallocate *pmc_config)
2219 {
2220         char *e, *p, *q;
2221         uint64_t evmask;
2222         int count, n;
2223         const struct pmc_masks *pm, *pmask;
2224
2225         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2226         pmc_config->pm_md.pm_ppro.pm_ppro_config = 0;
2227
2228         evmask = 0;
2229
2230 #define P6MASKSET(M)    pmask = p6_mask_ ## M
2231
2232         switch(pe) {
2233         case PMC_EV_P6_L2_IFETCH:       P6MASKSET(mesi); break;
2234         case PMC_EV_P6_L2_LD:           P6MASKSET(mesi); break;
2235         case PMC_EV_P6_L2_ST:           P6MASKSET(mesi); break;
2236         case PMC_EV_P6_L2_RQSTS:        P6MASKSET(mesi); break;
2237         case PMC_EV_P6_BUS_DRDY_CLOCKS:
2238         case PMC_EV_P6_BUS_LOCK_CLOCKS:
2239         case PMC_EV_P6_BUS_TRAN_BRD:
2240         case PMC_EV_P6_BUS_TRAN_RFO:
2241         case PMC_EV_P6_BUS_TRANS_WB:
2242         case PMC_EV_P6_BUS_TRAN_IFETCH:
2243         case PMC_EV_P6_BUS_TRAN_INVAL:
2244         case PMC_EV_P6_BUS_TRAN_PWR:
2245         case PMC_EV_P6_BUS_TRANS_P:
2246         case PMC_EV_P6_BUS_TRANS_IO:
2247         case PMC_EV_P6_BUS_TRAN_DEF:
2248         case PMC_EV_P6_BUS_TRAN_BURST:
2249         case PMC_EV_P6_BUS_TRAN_ANY:
2250         case PMC_EV_P6_BUS_TRAN_MEM:
2251                 P6MASKSET(any); break;
2252         case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
2253         case PMC_EV_P6_EMON_KNI_PREF_MISS:
2254                 P6MASKSET(ekp); break;
2255         case PMC_EV_P6_EMON_KNI_INST_RETIRED:
2256         case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
2257                 P6MASKSET(pps); break;
2258         case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
2259                 P6MASKSET(mite); break;
2260         case PMC_EV_P6_FP_MMX_TRANS:
2261                 P6MASKSET(fmt); break;
2262         case PMC_EV_P6_SEG_RENAME_STALLS:
2263         case PMC_EV_P6_SEG_REG_RENAMES:
2264                 P6MASKSET(sr);  break;
2265         case PMC_EV_P6_EMON_EST_TRANS:
2266                 P6MASKSET(eet); break;
2267         case PMC_EV_P6_EMON_FUSED_UOPS_RET:
2268                 P6MASKSET(efur); break;
2269         case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
2270                 P6MASKSET(essir); break;
2271         case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
2272                 P6MASKSET(esscir); break;
2273         default:
2274                 pmask = NULL;
2275                 break;
2276         }
2277
2278         /* Pentium M PMCs have a few events with different semantics */
2279         if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
2280                 if (pe == PMC_EV_P6_L2_LD ||
2281                     pe == PMC_EV_P6_L2_LINES_IN ||
2282                     pe == PMC_EV_P6_L2_LINES_OUT)
2283                         P6MASKSET(mesihw);
2284                 else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
2285                         P6MASKSET(hw);
2286         }
2287
2288         /* Parse additional modifiers if present */
2289         while ((p = strsep(&ctrspec, ",")) != NULL) {
2290                 if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
2291                         q = strchr(p, '=');
2292                         if (*++q == '\0') /* skip '=' */
2293                                 return (-1);
2294                         count = strtol(q, &e, 0);
2295                         if (e == q || *e != '\0')
2296                                 return (-1);
2297                         pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
2298                         pmc_config->pm_md.pm_ppro.pm_ppro_config |=
2299                             P6_EVSEL_TO_CMASK(count);
2300                 } else if (KWMATCH(p, P6_KW_EDGE)) {
2301                         pmc_config->pm_caps |= PMC_CAP_EDGE;
2302                 } else if (KWMATCH(p, P6_KW_INV)) {
2303                         pmc_config->pm_caps |= PMC_CAP_INVERT;
2304                 } else if (KWMATCH(p, P6_KW_OS)) {
2305                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2306                 } else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
2307                         evmask = 0;
2308                         if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
2309                                 return (-1);
2310                         if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
2311                              pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
2312                              pe == PMC_EV_P6_BUS_TRAN_BRD ||
2313                              pe == PMC_EV_P6_BUS_TRAN_RFO ||
2314                              pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
2315                              pe == PMC_EV_P6_BUS_TRAN_INVAL ||
2316                              pe == PMC_EV_P6_BUS_TRAN_PWR ||
2317                              pe == PMC_EV_P6_BUS_TRAN_DEF ||
2318                              pe == PMC_EV_P6_BUS_TRAN_BURST ||
2319                              pe == PMC_EV_P6_BUS_TRAN_ANY ||
2320                              pe == PMC_EV_P6_BUS_TRAN_MEM ||
2321                              pe == PMC_EV_P6_BUS_TRANS_IO ||
2322                              pe == PMC_EV_P6_BUS_TRANS_P ||
2323                              pe == PMC_EV_P6_BUS_TRANS_WB ||
2324                              pe == PMC_EV_P6_EMON_EST_TRANS ||
2325                              pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
2326                              pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
2327                              pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
2328                              pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
2329                              pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
2330                              pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
2331                              pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
2332                              pe == PMC_EV_P6_FP_MMX_TRANS)
2333                             && (n > 1)) /* Only one mask keyword is allowed. */
2334                                 return (-1);
2335                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2336                 } else if (KWMATCH(p, P6_KW_USR)) {
2337                         pmc_config->pm_caps |= PMC_CAP_USER;
2338                 } else
2339                         return (-1);
2340         }
2341
2342         /* post processing */
2343         switch (pe) {
2344
2345                 /*
2346                  * The following events default to an evmask of 0
2347                  */
2348
2349                 /* default => 'self' */
2350         case PMC_EV_P6_BUS_DRDY_CLOCKS:
2351         case PMC_EV_P6_BUS_LOCK_CLOCKS:
2352         case PMC_EV_P6_BUS_TRAN_BRD:
2353         case PMC_EV_P6_BUS_TRAN_RFO:
2354         case PMC_EV_P6_BUS_TRANS_WB:
2355         case PMC_EV_P6_BUS_TRAN_IFETCH:
2356         case PMC_EV_P6_BUS_TRAN_INVAL:
2357         case PMC_EV_P6_BUS_TRAN_PWR:
2358         case PMC_EV_P6_BUS_TRANS_P:
2359         case PMC_EV_P6_BUS_TRANS_IO:
2360         case PMC_EV_P6_BUS_TRAN_DEF:
2361         case PMC_EV_P6_BUS_TRAN_BURST:
2362         case PMC_EV_P6_BUS_TRAN_ANY:
2363         case PMC_EV_P6_BUS_TRAN_MEM:
2364
2365                 /* default => 'nta' */
2366         case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
2367         case PMC_EV_P6_EMON_KNI_PREF_MISS:
2368
2369                 /* default => 'packed and scalar' */
2370         case PMC_EV_P6_EMON_KNI_INST_RETIRED:
2371         case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
2372
2373                 /* default => 'mmx to fp transitions' */
2374         case PMC_EV_P6_FP_MMX_TRANS:
2375
2376                 /* default => 'SSE Packed Single' */
2377         case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
2378         case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
2379
2380                 /* default => 'all fused micro-ops' */
2381         case PMC_EV_P6_EMON_FUSED_UOPS_RET:
2382
2383                 /* default => 'all transitions' */
2384         case PMC_EV_P6_EMON_EST_TRANS:
2385                 break;
2386
2387         case PMC_EV_P6_MMX_UOPS_EXEC:
2388                 evmask = 0x0F;          /* only value allowed */
2389                 break;
2390
2391         default:
2392                 /*
2393                  * For all other events, set the default event mask
2394                  * to a logical OR of all the allowed event mask bits.
2395                  */
2396                 if (evmask == 0 && pmask) {
2397                         for (pm = pmask; pm->pm_name; pm++)
2398                                 evmask |= pm->pm_value;
2399                         pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2400                 }
2401
2402                 break;
2403         }
2404
2405         if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
2406                 pmc_config->pm_md.pm_ppro.pm_ppro_config |=
2407                     P6_EVSEL_TO_UMASK(evmask);
2408
2409         return (0);
2410 }
2411
2412 #endif
2413
2414 #if     defined(__i386__) || defined(__amd64__)
2415 static int
2416 tsc_allocate_pmc(enum pmc_event pe, char *ctrspec,
2417     struct pmc_op_pmcallocate *pmc_config)
2418 {
2419         if (pe != PMC_EV_TSC_TSC)
2420                 return (-1);
2421
2422         /* TSC events must be unqualified. */
2423         if (ctrspec && *ctrspec != '\0')
2424                 return (-1);
2425
2426         pmc_config->pm_md.pm_amd.pm_amd_config = 0;
2427         pmc_config->pm_caps |= PMC_CAP_READ;
2428
2429         return (0);
2430 }
2431 #endif
2432
2433 static struct pmc_event_alias generic_aliases[] = {
2434         EV_ALIAS("instructions",                "SOFT-CLOCK.HARD"),
2435         EV_ALIAS(NULL, NULL)
2436 };
2437
2438 static int
2439 soft_allocate_pmc(enum pmc_event pe, char *ctrspec,
2440     struct pmc_op_pmcallocate *pmc_config)
2441 {
2442         (void)ctrspec;
2443         (void)pmc_config;
2444
2445         if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST)
2446                 return (-1);
2447
2448         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2449         return (0);
2450 }
2451
2452 #if     defined(__XSCALE__)
2453
2454 static struct pmc_event_alias xscale_aliases[] = {
2455         EV_ALIAS("branches",            "BRANCH_RETIRED"),
2456         EV_ALIAS("branch-mispredicts",  "BRANCH_MISPRED"),
2457         EV_ALIAS("dc-misses",           "DC_MISS"),
2458         EV_ALIAS("ic-misses",           "IC_MISS"),
2459         EV_ALIAS("instructions",        "INSTR_RETIRED"),
2460         EV_ALIAS(NULL, NULL)
2461 };
2462 static int
2463 xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2464     struct pmc_op_pmcallocate *pmc_config __unused)
2465 {
2466         switch (pe) {
2467         default:
2468                 break;
2469         }
2470
2471         return (0);
2472 }
2473 #endif
2474
2475 #if defined(__mips__)
2476
2477 static struct pmc_event_alias mips24k_aliases[] = {
2478         EV_ALIAS("instructions",        "INSTR_EXECUTED"),
2479         EV_ALIAS("branches",            "BRANCH_COMPLETED"),
2480         EV_ALIAS("branch-mispredicts",  "BRANCH_MISPRED"),
2481         EV_ALIAS(NULL, NULL)
2482 };
2483
2484 static struct pmc_event_alias octeon_aliases[] = {
2485         EV_ALIAS("instructions",        "RET"),
2486         EV_ALIAS("branches",            "BR"),
2487         EV_ALIAS("branch-mispredicts",  "BRMIS"),
2488         EV_ALIAS(NULL, NULL)
2489 };
2490
2491 #define MIPS_KW_OS              "os"
2492 #define MIPS_KW_USR             "usr"
2493 #define MIPS_KW_ANYTHREAD       "anythread"
2494
2495 static int
2496 mips_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2497                   struct pmc_op_pmcallocate *pmc_config __unused)
2498 {
2499         char *p;
2500
2501         (void) pe;
2502
2503         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2504         
2505         while ((p = strsep(&ctrspec, ",")) != NULL) {
2506                 if (KWMATCH(p, MIPS_KW_OS))
2507                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2508                 else if (KWMATCH(p, MIPS_KW_USR))
2509                         pmc_config->pm_caps |= PMC_CAP_USER;
2510                 else if (KWMATCH(p, MIPS_KW_ANYTHREAD))
2511                         pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
2512                 else
2513                         return (-1);
2514         }
2515
2516         return (0);
2517 }
2518
2519 #endif /* __mips__ */
2520
2521 #if defined(__powerpc__)
2522
2523 static struct pmc_event_alias ppc7450_aliases[] = {
2524         EV_ALIAS("instructions",        "INSTR_COMPLETED"),
2525         EV_ALIAS("branches",            "BRANCHES_COMPLETED"),
2526         EV_ALIAS("branch-mispredicts",  "MISPREDICTED_BRANCHES"),
2527         EV_ALIAS(NULL, NULL)
2528 };
2529
2530 static struct pmc_event_alias ppc970_aliases[] = {
2531         EV_ALIAS("instructions", "INSTR_COMPLETED"),
2532         EV_ALIAS("cycles",       "CYCLES"),
2533         EV_ALIAS(NULL, NULL)
2534 };
2535
2536 #define POWERPC_KW_OS           "os"
2537 #define POWERPC_KW_USR          "usr"
2538 #define POWERPC_KW_ANYTHREAD    "anythread"
2539
2540 static int
2541 powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2542                      struct pmc_op_pmcallocate *pmc_config __unused)
2543 {
2544         char *p;
2545
2546         (void) pe;
2547
2548         pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2549         
2550         while ((p = strsep(&ctrspec, ",")) != NULL) {
2551                 if (KWMATCH(p, POWERPC_KW_OS))
2552                         pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2553                 else if (KWMATCH(p, POWERPC_KW_USR))
2554                         pmc_config->pm_caps |= PMC_CAP_USER;
2555                 else if (KWMATCH(p, POWERPC_KW_ANYTHREAD))
2556                         pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
2557                 else
2558                         return (-1);
2559         }
2560
2561         return (0);
2562 }
2563
2564 #endif /* __powerpc__ */
2565
2566
2567 /*
2568  * Match an event name `name' with its canonical form.
2569  *
2570  * Matches are case insensitive and spaces, periods, underscores and
2571  * hyphen characters are considered to match each other.
2572  *
2573  * Returns 1 for a match, 0 otherwise.
2574  */
2575
2576 static int
2577 pmc_match_event_name(const char *name, const char *canonicalname)
2578 {
2579         int cc, nc;
2580         const unsigned char *c, *n;
2581
2582         c = (const unsigned char *) canonicalname;
2583         n = (const unsigned char *) name;
2584
2585         for (; (nc = *n) && (cc = *c); n++, c++) {
2586
2587                 if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
2588                     (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
2589                         continue;
2590
2591                 if (toupper(nc) == toupper(cc))
2592                         continue;
2593
2594
2595                 return (0);
2596         }
2597
2598         if (*n == '\0' && *c == '\0')
2599                 return (1);
2600
2601         return (0);
2602 }
2603
2604 /*
2605  * Match an event name against all the event named supported by a
2606  * PMC class.
2607  *
2608  * Returns an event descriptor pointer on match or NULL otherwise.
2609  */
2610 static const struct pmc_event_descr *
2611 pmc_match_event_class(const char *name,
2612     const struct pmc_class_descr *pcd)
2613 {
2614         size_t n;
2615         const struct pmc_event_descr *ev;
2616
2617         ev = pcd->pm_evc_event_table;
2618         for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
2619                 if (pmc_match_event_name(name, ev->pm_ev_name))
2620                         return (ev);
2621
2622         return (NULL);
2623 }
2624
2625 static int
2626 pmc_mdep_is_compatible_class(enum pmc_class pc)
2627 {
2628         size_t n;
2629
2630         for (n = 0; n < pmc_mdep_class_list_size; n++)
2631                 if (pmc_mdep_class_list[n] == pc)
2632                         return (1);
2633         return (0);
2634 }
2635
2636 /*
2637  * API entry points
2638  */
2639
2640 int
2641 pmc_allocate(const char *ctrspec, enum pmc_mode mode,
2642     uint32_t flags, int cpu, pmc_id_t *pmcid)
2643 {
2644         size_t n;
2645         int retval;
2646         char *r, *spec_copy;
2647         const char *ctrname;
2648         const struct pmc_event_descr *ev;
2649         const struct pmc_event_alias *alias;
2650         struct pmc_op_pmcallocate pmc_config;
2651         const struct pmc_class_descr *pcd;
2652
2653         spec_copy = NULL;
2654         retval    = -1;
2655
2656         if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
2657             mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
2658                 errno = EINVAL;
2659                 goto out;
2660         }
2661
2662         /* replace an event alias with the canonical event specifier */
2663         if (pmc_mdep_event_aliases)
2664                 for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++)
2665                         if (!strcasecmp(ctrspec, alias->pm_alias)) {
2666                                 spec_copy = strdup(alias->pm_spec);
2667                                 break;
2668                         }
2669
2670         if (spec_copy == NULL)
2671                 spec_copy = strdup(ctrspec);
2672
2673         r = spec_copy;
2674         ctrname = strsep(&r, ",");
2675
2676         /*
2677          * If a explicit class prefix was given by the user, restrict the
2678          * search for the event to the specified PMC class.
2679          */
2680         ev = NULL;
2681         for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
2682                 pcd = pmc_class_table[n];
2683                 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) &&
2684                     strncasecmp(ctrname, pcd->pm_evc_name,
2685                                 pcd->pm_evc_name_size) == 0) {
2686                         if ((ev = pmc_match_event_class(ctrname +
2687                             pcd->pm_evc_name_size, pcd)) == NULL) {
2688                                 errno = EINVAL;
2689                                 goto out;
2690                         }
2691                         break;
2692                 }
2693         }
2694
2695         /*
2696          * Otherwise, search for this event in all compatible PMC
2697          * classes.
2698          */
2699         for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
2700                 pcd = pmc_class_table[n];
2701                 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class))
2702                         ev = pmc_match_event_class(ctrname, pcd);
2703         }
2704
2705         if (ev == NULL) {
2706                 errno = EINVAL;
2707                 goto out;
2708         }
2709
2710         bzero(&pmc_config, sizeof(pmc_config));
2711         pmc_config.pm_ev    = ev->pm_ev_code;
2712         pmc_config.pm_class = pcd->pm_evc_class;
2713         pmc_config.pm_cpu   = cpu;
2714         pmc_config.pm_mode  = mode;
2715         pmc_config.pm_flags = flags;
2716
2717         if (PMC_IS_SAMPLING_MODE(mode))
2718                 pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
2719
2720         if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) {
2721                 errno = EINVAL;
2722                 goto out;
2723         }
2724
2725         if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
2726                 goto out;
2727
2728         *pmcid = pmc_config.pm_pmcid;
2729
2730         retval = 0;
2731
2732  out:
2733         if (spec_copy)
2734                 free(spec_copy);
2735
2736         return (retval);
2737 }
2738
2739 int
2740 pmc_attach(pmc_id_t pmc, pid_t pid)
2741 {
2742         struct pmc_op_pmcattach pmc_attach_args;
2743
2744         pmc_attach_args.pm_pmc = pmc;
2745         pmc_attach_args.pm_pid = pid;
2746
2747         return (PMC_CALL(PMCATTACH, &pmc_attach_args));
2748 }
2749
2750 int
2751 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
2752 {
2753         unsigned int i;
2754         enum pmc_class cl;
2755
2756         cl = PMC_ID_TO_CLASS(pmcid);
2757         for (i = 0; i < cpu_info.pm_nclass; i++)
2758                 if (cpu_info.pm_classes[i].pm_class == cl) {
2759                         *caps = cpu_info.pm_classes[i].pm_caps;
2760                         return (0);
2761                 }
2762         errno = EINVAL;
2763         return (-1);
2764 }
2765
2766 int
2767 pmc_configure_logfile(int fd)
2768 {
2769         struct pmc_op_configurelog cla;
2770
2771         cla.pm_logfd = fd;
2772         if (PMC_CALL(CONFIGURELOG, &cla) < 0)
2773                 return (-1);
2774         return (0);
2775 }
2776
2777 int
2778 pmc_cpuinfo(const struct pmc_cpuinfo **pci)
2779 {
2780         if (pmc_syscall == -1) {
2781                 errno = ENXIO;
2782                 return (-1);
2783         }
2784
2785         *pci = &cpu_info;
2786         return (0);
2787 }
2788
2789 int
2790 pmc_detach(pmc_id_t pmc, pid_t pid)
2791 {
2792         struct pmc_op_pmcattach pmc_detach_args;
2793
2794         pmc_detach_args.pm_pmc = pmc;
2795         pmc_detach_args.pm_pid = pid;
2796         return (PMC_CALL(PMCDETACH, &pmc_detach_args));
2797 }
2798
2799 int
2800 pmc_disable(int cpu, int pmc)
2801 {
2802         struct pmc_op_pmcadmin ssa;
2803
2804         ssa.pm_cpu = cpu;
2805         ssa.pm_pmc = pmc;
2806         ssa.pm_state = PMC_STATE_DISABLED;
2807         return (PMC_CALL(PMCADMIN, &ssa));
2808 }
2809
2810 int
2811 pmc_enable(int cpu, int pmc)
2812 {
2813         struct pmc_op_pmcadmin ssa;
2814
2815         ssa.pm_cpu = cpu;
2816         ssa.pm_pmc = pmc;
2817         ssa.pm_state = PMC_STATE_FREE;
2818         return (PMC_CALL(PMCADMIN, &ssa));
2819 }
2820
2821 /*
2822  * Return a list of events known to a given PMC class.  'cl' is the
2823  * PMC class identifier, 'eventnames' is the returned list of 'const
2824  * char *' pointers pointing to the names of the events. 'nevents' is
2825  * the number of event name pointers returned.
2826  *
2827  * The space for 'eventnames' is allocated using malloc(3).  The caller
2828  * is responsible for freeing this space when done.
2829  */
2830 int
2831 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
2832     int *nevents)
2833 {
2834         int count;
2835         const char **names;
2836         const struct pmc_event_descr *ev;
2837
2838         switch (cl)
2839         {
2840         case PMC_CLASS_IAF:
2841                 ev = iaf_event_table;
2842                 count = PMC_EVENT_TABLE_SIZE(iaf);
2843                 break;
2844         case PMC_CLASS_IAP:
2845                 /*
2846                  * Return the most appropriate set of event name
2847                  * spellings for the current CPU.
2848                  */
2849                 switch (cpu_info.pm_cputype) {
2850                 default:
2851                 case PMC_CPU_INTEL_ATOM:
2852                         ev = atom_event_table;
2853                         count = PMC_EVENT_TABLE_SIZE(atom);
2854                         break;
2855                 case PMC_CPU_INTEL_ATOM_SILVERMONT:
2856                         ev = atom_silvermont_event_table;
2857                         count = PMC_EVENT_TABLE_SIZE(atom_silvermont);
2858                         break;
2859                 case PMC_CPU_INTEL_CORE:
2860                         ev = core_event_table;
2861                         count = PMC_EVENT_TABLE_SIZE(core);
2862                         break;
2863                 case PMC_CPU_INTEL_CORE2:
2864                 case PMC_CPU_INTEL_CORE2EXTREME:
2865                         ev = core2_event_table;
2866                         count = PMC_EVENT_TABLE_SIZE(core2);
2867                         break;
2868                 case PMC_CPU_INTEL_COREI7:
2869                         ev = corei7_event_table;
2870                         count = PMC_EVENT_TABLE_SIZE(corei7);
2871                         break;
2872                 case PMC_CPU_INTEL_NEHALEM_EX:
2873                         ev = nehalem_ex_event_table;
2874                         count = PMC_EVENT_TABLE_SIZE(nehalem_ex);
2875                         break;
2876                 case PMC_CPU_INTEL_HASWELL:
2877                         ev = haswell_event_table;
2878                         count = PMC_EVENT_TABLE_SIZE(haswell);
2879                         break;
2880                 case PMC_CPU_INTEL_HASWELL_XEON:
2881                         ev = haswell_xeon_event_table;
2882                         count = PMC_EVENT_TABLE_SIZE(haswell_xeon);
2883                         break;
2884                 case PMC_CPU_INTEL_BROADWELL:
2885                         ev = broadwell_event_table;
2886                         count = PMC_EVENT_TABLE_SIZE(broadwell);
2887                         break;
2888                 case PMC_CPU_INTEL_BROADWELL_XEON:
2889                         ev = broadwell_xeon_event_table;
2890                         count = PMC_EVENT_TABLE_SIZE(broadwell_xeon);
2891                         break;
2892                 case PMC_CPU_INTEL_SKYLAKE:
2893                         ev = skylake_event_table;
2894                         count = PMC_EVENT_TABLE_SIZE(skylake);
2895                         break;
2896                 case PMC_CPU_INTEL_IVYBRIDGE:
2897                         ev = ivybridge_event_table;
2898                         count = PMC_EVENT_TABLE_SIZE(ivybridge);
2899                         break;
2900                 case PMC_CPU_INTEL_IVYBRIDGE_XEON:
2901                         ev = ivybridge_xeon_event_table;
2902                         count = PMC_EVENT_TABLE_SIZE(ivybridge_xeon);
2903                         break;
2904                 case PMC_CPU_INTEL_SANDYBRIDGE:
2905                         ev = sandybridge_event_table;
2906                         count = PMC_EVENT_TABLE_SIZE(sandybridge);
2907                         break;
2908                 case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
2909                         ev = sandybridge_xeon_event_table;
2910                         count = PMC_EVENT_TABLE_SIZE(sandybridge_xeon);
2911                         break;
2912                 case PMC_CPU_INTEL_WESTMERE:
2913                         ev = westmere_event_table;
2914                         count = PMC_EVENT_TABLE_SIZE(westmere);
2915                         break;
2916                 case PMC_CPU_INTEL_WESTMERE_EX:
2917                         ev = westmere_ex_event_table;
2918                         count = PMC_EVENT_TABLE_SIZE(westmere_ex);
2919                         break;
2920                 }
2921                 break;
2922         case PMC_CLASS_UCF:
2923                 ev = ucf_event_table;
2924                 count = PMC_EVENT_TABLE_SIZE(ucf);
2925                 break;
2926         case PMC_CLASS_UCP:
2927                 /*
2928                  * Return the most appropriate set of event name
2929                  * spellings for the current CPU.
2930                  */
2931                 switch (cpu_info.pm_cputype) {
2932                 default:
2933                 case PMC_CPU_INTEL_COREI7:
2934                         ev = corei7uc_event_table;
2935                         count = PMC_EVENT_TABLE_SIZE(corei7uc);
2936                         break;
2937                 case PMC_CPU_INTEL_HASWELL:
2938                         ev = haswelluc_event_table;
2939                         count = PMC_EVENT_TABLE_SIZE(haswelluc);
2940                         break;
2941                 case PMC_CPU_INTEL_BROADWELL:
2942                         ev = broadwelluc_event_table;
2943                         count = PMC_EVENT_TABLE_SIZE(broadwelluc);
2944                         break;
2945                 case PMC_CPU_INTEL_SANDYBRIDGE:
2946                         ev = sandybridgeuc_event_table;
2947                         count = PMC_EVENT_TABLE_SIZE(sandybridgeuc);
2948                         break;
2949                 case PMC_CPU_INTEL_WESTMERE:
2950                         ev = westmereuc_event_table;
2951                         count = PMC_EVENT_TABLE_SIZE(westmereuc);
2952                         break;
2953                 }
2954                 break;
2955         case PMC_CLASS_TSC:
2956                 ev = tsc_event_table;
2957                 count = PMC_EVENT_TABLE_SIZE(tsc);
2958                 break;
2959         case PMC_CLASS_K7:
2960                 ev = k7_event_table;
2961                 count = PMC_EVENT_TABLE_SIZE(k7);
2962                 break;
2963         case PMC_CLASS_K8:
2964                 ev = k8_event_table;
2965                 count = PMC_EVENT_TABLE_SIZE(k8);
2966                 break;
2967         case PMC_CLASS_P4:
2968                 ev = p4_event_table;
2969                 count = PMC_EVENT_TABLE_SIZE(p4);
2970                 break;
2971         case PMC_CLASS_P5:
2972                 ev = p5_event_table;
2973                 count = PMC_EVENT_TABLE_SIZE(p5);
2974                 break;
2975         case PMC_CLASS_P6:
2976                 ev = p6_event_table;
2977                 count = PMC_EVENT_TABLE_SIZE(p6);
2978                 break;
2979         case PMC_CLASS_XSCALE:
2980                 ev = xscale_event_table;
2981                 count = PMC_EVENT_TABLE_SIZE(xscale);
2982                 break;
2983         case PMC_CLASS_MIPS24K:
2984                 ev = mips24k_event_table;
2985                 count = PMC_EVENT_TABLE_SIZE(mips24k);
2986                 break;
2987         case PMC_CLASS_OCTEON:
2988                 ev = octeon_event_table;
2989                 count = PMC_EVENT_TABLE_SIZE(octeon);
2990                 break;
2991         case PMC_CLASS_PPC7450:
2992                 ev = ppc7450_event_table;
2993                 count = PMC_EVENT_TABLE_SIZE(ppc7450);
2994                 break;
2995         case PMC_CLASS_PPC970:
2996                 ev = ppc970_event_table;
2997                 count = PMC_EVENT_TABLE_SIZE(ppc970);
2998                 break;
2999         case PMC_CLASS_SOFT:
3000                 ev = soft_event_table;
3001                 count = soft_event_info.pm_nevent;
3002                 break;
3003         default:
3004                 errno = EINVAL;
3005                 return (-1);
3006         }
3007
3008         if ((names = malloc(count * sizeof(const char *))) == NULL)
3009                 return (-1);
3010
3011         *eventnames = names;
3012         *nevents = count;
3013
3014         for (;count--; ev++, names++)
3015                 *names = ev->pm_ev_name;
3016
3017         return (0);
3018 }
3019
3020 int
3021 pmc_flush_logfile(void)
3022 {
3023         return (PMC_CALL(FLUSHLOG,0));
3024 }
3025
3026 int
3027 pmc_close_logfile(void)
3028 {
3029         return (PMC_CALL(CLOSELOG,0));
3030 }
3031
3032 int
3033 pmc_get_driver_stats(struct pmc_driverstats *ds)
3034 {
3035         struct pmc_op_getdriverstats gms;
3036
3037         if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
3038                 return (-1);
3039
3040         /* copy out fields in the current userland<->library interface */
3041         ds->pm_intr_ignored    = gms.pm_intr_ignored;
3042         ds->pm_intr_processed  = gms.pm_intr_processed;
3043         ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
3044         ds->pm_syscalls        = gms.pm_syscalls;
3045         ds->pm_syscall_errors  = gms.pm_syscall_errors;
3046         ds->pm_buffer_requests = gms.pm_buffer_requests;
3047         ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
3048         ds->pm_log_sweeps      = gms.pm_log_sweeps;
3049         return (0);
3050 }
3051
3052 int
3053 pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
3054 {
3055         struct pmc_op_getmsr gm;
3056
3057         gm.pm_pmcid = pmc;
3058         if (PMC_CALL(PMCGETMSR, &gm) < 0)
3059                 return (-1);
3060         *msr = gm.pm_msr;
3061         return (0);
3062 }
3063
3064 int
3065 pmc_init(void)
3066 {
3067         int error, pmc_mod_id;
3068         unsigned int n;
3069         uint32_t abi_version;
3070         struct module_stat pmc_modstat;
3071         struct pmc_op_getcpuinfo op_cpu_info;
3072 #if defined(__amd64__) || defined(__i386__)
3073         int cpu_has_iaf_counters;
3074         unsigned int t;
3075 #endif
3076
3077         if (pmc_syscall != -1) /* already inited */
3078                 return (0);
3079
3080         /* retrieve the system call number from the KLD */
3081         if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
3082                 return (-1);
3083
3084         pmc_modstat.version = sizeof(struct module_stat);
3085         if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
3086                 return (-1);
3087
3088         pmc_syscall = pmc_modstat.data.intval;
3089
3090         /* check the kernel module's ABI against our compiled-in version */
3091         abi_version = PMC_VERSION;
3092         if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
3093                 return (pmc_syscall = -1);
3094
3095         /* ignore patch & minor numbers for the comparision */
3096         if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
3097                 errno  = EPROGMISMATCH;
3098                 return (pmc_syscall = -1);
3099         }
3100
3101         if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0)
3102                 return (pmc_syscall = -1);
3103
3104         cpu_info.pm_cputype = op_cpu_info.pm_cputype;
3105         cpu_info.pm_ncpu    = op_cpu_info.pm_ncpu;
3106         cpu_info.pm_npmc    = op_cpu_info.pm_npmc;
3107         cpu_info.pm_nclass  = op_cpu_info.pm_nclass;
3108         for (n = 0; n < cpu_info.pm_nclass; n++)
3109                 cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n];
3110
3111         pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE *
3112             sizeof(struct pmc_class_descr *));
3113
3114         if (pmc_class_table == NULL)
3115                 return (-1);
3116
3117         for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++)
3118                 pmc_class_table[n] = NULL;
3119
3120         /*
3121          * Get soft events list.
3122          */
3123         soft_event_info.pm_class = PMC_CLASS_SOFT;
3124         if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0)
3125                 return (pmc_syscall = -1);
3126
3127         /* Map soft events to static list. */
3128         for (n = 0; n < soft_event_info.pm_nevent; n++) {
3129                 soft_event_table[n].pm_ev_name =
3130                     soft_event_info.pm_events[n].pm_ev_name;
3131                 soft_event_table[n].pm_ev_code =
3132                     soft_event_info.pm_events[n].pm_ev_code;
3133         }
3134         soft_class_table_descr.pm_evc_event_table_size = \
3135             soft_event_info.pm_nevent;
3136         soft_class_table_descr.pm_evc_event_table = \
3137             soft_event_table;
3138
3139         /*
3140          * Fill in the class table.
3141          */
3142         n = 0;
3143
3144         /* Fill soft events information. */
3145         pmc_class_table[n++] = &soft_class_table_descr;
3146 #if defined(__amd64__) || defined(__i386__)
3147         if (cpu_info.pm_cputype != PMC_CPU_GENERIC)
3148                 pmc_class_table[n++] = &tsc_class_table_descr;
3149
3150         /*
3151          * Check if this CPU has fixed function counters.
3152          */
3153         cpu_has_iaf_counters = 0;
3154         for (t = 0; t < cpu_info.pm_nclass; t++)
3155                 if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF &&
3156                     cpu_info.pm_classes[t].pm_num > 0)
3157                         cpu_has_iaf_counters = 1;
3158 #endif
3159
3160 #define PMC_MDEP_INIT(C) do {                                   \
3161                 pmc_mdep_event_aliases    = C##_aliases;        \
3162                 pmc_mdep_class_list  = C##_pmc_classes;         \
3163                 pmc_mdep_class_list_size =                      \
3164                     PMC_TABLE_SIZE(C##_pmc_classes);            \
3165         } while (0)
3166
3167 #define PMC_MDEP_INIT_INTEL_V2(C) do {                                  \
3168                 PMC_MDEP_INIT(C);                                       \
3169                 pmc_class_table[n++] = &iaf_class_table_descr;          \
3170                 if (!cpu_has_iaf_counters)                              \
3171                         pmc_mdep_event_aliases =                        \
3172                                 C##_aliases_without_iaf;                \
3173                 pmc_class_table[n] = &C##_class_table_descr;            \
3174         } while (0)
3175
3176         /* Configure the event name parser. */
3177         switch (cpu_info.pm_cputype) {
3178 #if defined(__i386__)
3179         case PMC_CPU_AMD_K7:
3180                 PMC_MDEP_INIT(k7);
3181                 pmc_class_table[n] = &k7_class_table_descr;
3182                 break;
3183         case PMC_CPU_INTEL_P5:
3184                 PMC_MDEP_INIT(p5);
3185                 pmc_class_table[n]  = &p5_class_table_descr;
3186                 break;
3187         case PMC_CPU_INTEL_P6:          /* P6 ... Pentium M CPUs have */
3188         case PMC_CPU_INTEL_PII:         /* similar PMCs. */
3189         case PMC_CPU_INTEL_PIII:
3190         case PMC_CPU_INTEL_PM:
3191                 PMC_MDEP_INIT(p6);
3192                 pmc_class_table[n] = &p6_class_table_descr;
3193                 break;
3194 #endif
3195 #if defined(__amd64__) || defined(__i386__)
3196         case PMC_CPU_AMD_K8:
3197                 PMC_MDEP_INIT(k8);
3198                 pmc_class_table[n] = &k8_class_table_descr;
3199                 break;
3200         case PMC_CPU_INTEL_ATOM:
3201                 PMC_MDEP_INIT_INTEL_V2(atom);
3202                 break;
3203         case PMC_CPU_INTEL_ATOM_SILVERMONT:
3204                 PMC_MDEP_INIT_INTEL_V2(atom_silvermont);
3205                 break;
3206         case PMC_CPU_INTEL_CORE:
3207                 PMC_MDEP_INIT(core);
3208                 pmc_class_table[n] = &core_class_table_descr;
3209                 break;
3210         case PMC_CPU_INTEL_CORE2:
3211         case PMC_CPU_INTEL_CORE2EXTREME:
3212                 PMC_MDEP_INIT_INTEL_V2(core2);
3213                 break;
3214         case PMC_CPU_INTEL_COREI7:
3215                 pmc_class_table[n++] = &ucf_class_table_descr;
3216                 pmc_class_table[n++] = &corei7uc_class_table_descr;
3217                 PMC_MDEP_INIT_INTEL_V2(corei7);
3218                 break;
3219         case PMC_CPU_INTEL_NEHALEM_EX:
3220                 PMC_MDEP_INIT_INTEL_V2(nehalem_ex);
3221                 break;
3222         case PMC_CPU_INTEL_HASWELL:
3223                 pmc_class_table[n++] = &ucf_class_table_descr;
3224                 pmc_class_table[n++] = &haswelluc_class_table_descr;
3225                 PMC_MDEP_INIT_INTEL_V2(haswell);
3226                 break;
3227         case PMC_CPU_INTEL_HASWELL_XEON:
3228                 PMC_MDEP_INIT_INTEL_V2(haswell_xeon);
3229                 break;
3230         case PMC_CPU_INTEL_BROADWELL:
3231                 pmc_class_table[n++] = &ucf_class_table_descr;
3232                 pmc_class_table[n++] = &broadwelluc_class_table_descr;
3233                 PMC_MDEP_INIT_INTEL_V2(broadwell);
3234                 break;
3235         case PMC_CPU_INTEL_BROADWELL_XEON:
3236                 PMC_MDEP_INIT_INTEL_V2(broadwell_xeon);
3237                 break;
3238         case PMC_CPU_INTEL_SKYLAKE:
3239                 PMC_MDEP_INIT_INTEL_V2(skylake);
3240                 break;
3241         case PMC_CPU_INTEL_IVYBRIDGE:
3242                 PMC_MDEP_INIT_INTEL_V2(ivybridge);
3243                 break;
3244         case PMC_CPU_INTEL_IVYBRIDGE_XEON:
3245                 PMC_MDEP_INIT_INTEL_V2(ivybridge_xeon);
3246                 break;
3247         case PMC_CPU_INTEL_SANDYBRIDGE:
3248                 pmc_class_table[n++] = &ucf_class_table_descr;
3249                 pmc_class_table[n++] = &sandybridgeuc_class_table_descr;
3250                 PMC_MDEP_INIT_INTEL_V2(sandybridge);
3251                 break;
3252         case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
3253                 PMC_MDEP_INIT_INTEL_V2(sandybridge_xeon);
3254                 break;
3255         case PMC_CPU_INTEL_WESTMERE:
3256                 pmc_class_table[n++] = &ucf_class_table_descr;
3257                 pmc_class_table[n++] = &westmereuc_class_table_descr;
3258                 PMC_MDEP_INIT_INTEL_V2(westmere);
3259                 break;
3260         case PMC_CPU_INTEL_WESTMERE_EX:
3261                 PMC_MDEP_INIT_INTEL_V2(westmere_ex);
3262                 break;
3263         case PMC_CPU_INTEL_PIV:
3264                 PMC_MDEP_INIT(p4);
3265                 pmc_class_table[n] = &p4_class_table_descr;
3266                 break;
3267 #endif
3268         case PMC_CPU_GENERIC:
3269                 PMC_MDEP_INIT(generic);
3270                 break;
3271 #if defined(__XSCALE__)
3272         case PMC_CPU_INTEL_XSCALE:
3273                 PMC_MDEP_INIT(xscale);
3274                 pmc_class_table[n] = &xscale_class_table_descr;
3275                 break;
3276 #endif
3277 #if defined(__mips__)
3278         case PMC_CPU_MIPS_24K:
3279                 PMC_MDEP_INIT(mips24k);
3280                 pmc_class_table[n] = &mips24k_class_table_descr;
3281                 break;
3282         case PMC_CPU_MIPS_OCTEON:
3283                 PMC_MDEP_INIT(octeon);
3284                 pmc_class_table[n] = &octeon_class_table_descr;
3285                 break;
3286 #endif /* __mips__ */
3287 #if defined(__powerpc__)
3288         case PMC_CPU_PPC_7450:
3289                 PMC_MDEP_INIT(ppc7450);
3290                 pmc_class_table[n] = &ppc7450_class_table_descr;
3291                 break;
3292         case PMC_CPU_PPC_970:
3293                 PMC_MDEP_INIT(ppc970);
3294                 pmc_class_table[n] = &ppc970_class_table_descr;
3295                 break;
3296 #endif
3297         default:
3298                 /*
3299                  * Some kind of CPU this version of the library knows nothing
3300                  * about.  This shouldn't happen since the abi version check
3301                  * should have caught this.
3302                  */
3303                 errno = ENXIO;
3304                 return (pmc_syscall = -1);
3305         }
3306
3307         return (0);
3308 }
3309
3310 const char *
3311 pmc_name_of_capability(enum pmc_caps cap)
3312 {
3313         int i;
3314
3315         /*
3316          * 'cap' should have a single bit set and should be in
3317          * range.
3318          */
3319         if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
3320             cap > PMC_CAP_LAST) {
3321                 errno = EINVAL;
3322                 return (NULL);
3323         }
3324
3325         i = ffs(cap);
3326         return (pmc_capability_names[i - 1]);
3327 }
3328
3329 const char *
3330 pmc_name_of_class(enum pmc_class pc)
3331 {
3332         if ((int) pc >= PMC_CLASS_FIRST &&
3333             pc <= PMC_CLASS_LAST)
3334                 return (pmc_class_names[pc]);
3335
3336         errno = EINVAL;
3337         return (NULL);
3338 }
3339
3340 const char *
3341 pmc_name_of_cputype(enum pmc_cputype cp)
3342 {
3343         size_t n;
3344
3345         for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++)
3346                 if (cp == pmc_cputype_names[n].pm_cputype)
3347                         return (pmc_cputype_names[n].pm_name);
3348
3349         errno = EINVAL;
3350         return (NULL);
3351 }
3352
3353 const char *
3354 pmc_name_of_disposition(enum pmc_disp pd)
3355 {
3356         if ((int) pd >= PMC_DISP_FIRST &&
3357             pd <= PMC_DISP_LAST)
3358                 return (pmc_disposition_names[pd]);
3359
3360         errno = EINVAL;
3361         return (NULL);
3362 }
3363
3364 const char *
3365 _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
3366 {
3367         const struct pmc_event_descr *ev, *evfence;
3368
3369         ev = evfence = NULL;
3370         if (pe >= PMC_EV_IAF_FIRST && pe <= PMC_EV_IAF_LAST) {
3371                 ev = iaf_event_table;
3372                 evfence = iaf_event_table + PMC_EVENT_TABLE_SIZE(iaf);
3373         } else if (pe >= PMC_EV_IAP_FIRST && pe <= PMC_EV_IAP_LAST) {
3374                 switch (cpu) {
3375                 case PMC_CPU_INTEL_ATOM:
3376                         ev = atom_event_table;
3377                         evfence = atom_event_table + PMC_EVENT_TABLE_SIZE(atom);
3378                         break;
3379                 case PMC_CPU_INTEL_ATOM_SILVERMONT:
3380                         ev = atom_silvermont_event_table;
3381                         evfence = atom_silvermont_event_table +
3382                             PMC_EVENT_TABLE_SIZE(atom_silvermont);
3383                         break;
3384                 case PMC_CPU_INTEL_CORE:
3385                         ev = core_event_table;
3386                         evfence = core_event_table + PMC_EVENT_TABLE_SIZE(core);
3387                         break;
3388                 case PMC_CPU_INTEL_CORE2:
3389                 case PMC_CPU_INTEL_CORE2EXTREME:
3390                         ev = core2_event_table;
3391                         evfence = core2_event_table + PMC_EVENT_TABLE_SIZE(core2);
3392                         break;
3393                 case PMC_CPU_INTEL_COREI7:
3394                         ev = corei7_event_table;
3395                         evfence = corei7_event_table + PMC_EVENT_TABLE_SIZE(corei7);
3396                         break;
3397                 case PMC_CPU_INTEL_NEHALEM_EX:
3398                         ev = nehalem_ex_event_table;
3399                         evfence = nehalem_ex_event_table +
3400                             PMC_EVENT_TABLE_SIZE(nehalem_ex);
3401                         break;
3402                 case PMC_CPU_INTEL_HASWELL:
3403                         ev = haswell_event_table;
3404                         evfence = haswell_event_table + PMC_EVENT_TABLE_SIZE(haswell);
3405                         break;
3406                 case PMC_CPU_INTEL_HASWELL_XEON:
3407                         ev = haswell_xeon_event_table;
3408                         evfence = haswell_xeon_event_table + PMC_EVENT_TABLE_SIZE(haswell_xeon);
3409                         break;
3410                 case PMC_CPU_INTEL_BROADWELL:
3411                         ev = broadwell_event_table;
3412                         evfence = broadwell_event_table + PMC_EVENT_TABLE_SIZE(broadwell);
3413                         break;
3414                 case PMC_CPU_INTEL_BROADWELL_XEON:
3415                         ev = broadwell_xeon_event_table;
3416                         evfence = broadwell_xeon_event_table + PMC_EVENT_TABLE_SIZE(broadwell_xeon);
3417                         break;
3418                 case PMC_CPU_INTEL_SKYLAKE:
3419                         ev = skylake_event_table;
3420                         evfence = skylake_event_table + PMC_EVENT_TABLE_SIZE(skylake);
3421                         break;
3422                 case PMC_CPU_INTEL_IVYBRIDGE:
3423                         ev = ivybridge_event_table;
3424                         evfence = ivybridge_event_table + PMC_EVENT_TABLE_SIZE(ivybridge);
3425                         break;
3426                 case PMC_CPU_INTEL_IVYBRIDGE_XEON:
3427                         ev = ivybridge_xeon_event_table;
3428                         evfence = ivybridge_xeon_event_table + PMC_EVENT_TABLE_SIZE(ivybridge_xeon);
3429                         break;
3430                 case PMC_CPU_INTEL_SANDYBRIDGE:
3431                         ev = sandybridge_event_table;
3432                         evfence = sandybridge_event_table + PMC_EVENT_TABLE_SIZE(sandybridge);
3433                         break;
3434                 case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
3435                         ev = sandybridge_xeon_event_table;
3436                         evfence = sandybridge_xeon_event_table + PMC_EVENT_TABLE_SIZE(sandybridge_xeon);
3437                         break;
3438                 case PMC_CPU_INTEL_WESTMERE:
3439                         ev = westmere_event_table;
3440                         evfence = westmere_event_table + PMC_EVENT_TABLE_SIZE(westmere);
3441                         break;
3442                 case PMC_CPU_INTEL_WESTMERE_EX:
3443                         ev = westmere_ex_event_table;
3444                         evfence = westmere_ex_event_table +
3445                             PMC_EVENT_TABLE_SIZE(westmere_ex);
3446                         break;
3447                 default:        /* Unknown CPU type. */
3448                         break;
3449                 }
3450         } else if (pe >= PMC_EV_UCF_FIRST && pe <= PMC_EV_UCF_LAST) {
3451                 ev = ucf_event_table;
3452                 evfence = ucf_event_table + PMC_EVENT_TABLE_SIZE(ucf);
3453         } else if (pe >= PMC_EV_UCP_FIRST && pe <= PMC_EV_UCP_LAST) {
3454                 switch (cpu) {
3455                 case PMC_CPU_INTEL_COREI7:
3456                         ev = corei7uc_event_table;
3457                         evfence = corei7uc_event_table + PMC_EVENT_TABLE_SIZE(corei7uc);
3458                         break;
3459                 case PMC_CPU_INTEL_SANDYBRIDGE:
3460                         ev = sandybridgeuc_event_table;
3461                         evfence = sandybridgeuc_event_table + PMC_EVENT_TABLE_SIZE(sandybridgeuc);
3462                         break;
3463                 case PMC_CPU_INTEL_WESTMERE:
3464                         ev = westmereuc_event_table;
3465                         evfence = westmereuc_event_table + PMC_EVENT_TABLE_SIZE(westmereuc);
3466                         break;
3467                 default:        /* Unknown CPU type. */
3468                         break;
3469                 }
3470         } else if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) {
3471                 ev = k7_event_table;
3472                 evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7);
3473         } else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
3474                 ev = k8_event_table;
3475                 evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8);
3476         } else if (pe >= PMC_EV_P4_FIRST && pe <= PMC_EV_P4_LAST) {
3477                 ev = p4_event_table;
3478                 evfence = p4_event_table + PMC_EVENT_TABLE_SIZE(p4);
3479         } else if (pe >= PMC_EV_P5_FIRST && pe <= PMC_EV_P5_LAST) {
3480                 ev = p5_event_table;
3481                 evfence = p5_event_table + PMC_EVENT_TABLE_SIZE(p5);
3482         } else if (pe >= PMC_EV_P6_FIRST && pe <= PMC_EV_P6_LAST) {
3483                 ev = p6_event_table;
3484                 evfence = p6_event_table + PMC_EVENT_TABLE_SIZE(p6);
3485         } else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) {
3486                 ev = xscale_event_table;
3487                 evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale);
3488         } else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) {
3489                 ev = mips24k_event_table;
3490                 evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k);
3491         } else if (pe >= PMC_EV_OCTEON_FIRST && pe <= PMC_EV_OCTEON_LAST) {
3492                 ev = octeon_event_table;
3493                 evfence = octeon_event_table + PMC_EVENT_TABLE_SIZE(octeon);
3494         } else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) {
3495                 ev = ppc7450_event_table;
3496                 evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450);
3497         } else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) {
3498                 ev = ppc970_event_table;
3499                 evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970);
3500         } else if (pe == PMC_EV_TSC_TSC) {
3501                 ev = tsc_event_table;
3502                 evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
3503         } else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) {
3504                 ev = soft_event_table;
3505                 evfence = soft_event_table + soft_event_info.pm_nevent;
3506         }
3507
3508         for (; ev != evfence; ev++)
3509                 if (pe == ev->pm_ev_code)
3510                         return (ev->pm_ev_name);
3511
3512         return (NULL);
3513 }
3514
3515 const char *
3516 pmc_name_of_event(enum pmc_event pe)
3517 {
3518         const char *n;
3519
3520         if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
3521                 return (n);
3522
3523         errno = EINVAL;
3524         return (NULL);
3525 }
3526
3527 const char *
3528 pmc_name_of_mode(enum pmc_mode pm)
3529 {
3530         if ((int) pm >= PMC_MODE_FIRST &&
3531             pm <= PMC_MODE_LAST)
3532                 return (pmc_mode_names[pm]);
3533
3534         errno = EINVAL;
3535         return (NULL);
3536 }
3537
3538 const char *
3539 pmc_name_of_state(enum pmc_state ps)
3540 {
3541         if ((int) ps >= PMC_STATE_FIRST &&
3542             ps <= PMC_STATE_LAST)
3543                 return (pmc_state_names[ps]);
3544
3545         errno = EINVAL;
3546         return (NULL);
3547 }
3548
3549 int
3550 pmc_ncpu(void)
3551 {
3552         if (pmc_syscall == -1) {
3553                 errno = ENXIO;
3554                 return (-1);
3555         }
3556
3557         return (cpu_info.pm_ncpu);
3558 }
3559
3560 int
3561 pmc_npmc(int cpu)
3562 {
3563         if (pmc_syscall == -1) {
3564                 errno = ENXIO;
3565                 return (-1);
3566         }
3567
3568         if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
3569                 errno = EINVAL;
3570                 return (-1);
3571         }
3572
3573         return (cpu_info.pm_npmc);
3574 }
3575
3576 int
3577 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
3578 {
3579         int nbytes, npmc;
3580         struct pmc_op_getpmcinfo *pmci;
3581
3582         if ((npmc = pmc_npmc(cpu)) < 0)
3583                 return (-1);
3584
3585         nbytes = sizeof(struct pmc_op_getpmcinfo) +
3586             npmc * sizeof(struct pmc_info);
3587
3588         if ((pmci = calloc(1, nbytes)) == NULL)
3589                 return (-1);
3590
3591         pmci->pm_cpu  = cpu;
3592
3593         if (PMC_CALL(GETPMCINFO, pmci) < 0) {
3594                 free(pmci);
3595                 return (-1);
3596         }
3597
3598         /* kernel<->library, library<->userland interfaces are identical */
3599         *ppmci = (struct pmc_pmcinfo *) pmci;
3600         return (0);
3601 }
3602
3603 int
3604 pmc_read(pmc_id_t pmc, pmc_value_t *value)
3605 {
3606         struct pmc_op_pmcrw pmc_read_op;
3607
3608         pmc_read_op.pm_pmcid = pmc;
3609         pmc_read_op.pm_flags = PMC_F_OLDVALUE;
3610         pmc_read_op.pm_value = -1;
3611
3612         if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
3613                 return (-1);
3614
3615         *value = pmc_read_op.pm_value;
3616         return (0);
3617 }
3618
3619 int
3620 pmc_release(pmc_id_t pmc)
3621 {
3622         struct pmc_op_simple    pmc_release_args;
3623
3624         pmc_release_args.pm_pmcid = pmc;
3625         return (PMC_CALL(PMCRELEASE, &pmc_release_args));
3626 }
3627
3628 int
3629 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
3630 {
3631         struct pmc_op_pmcrw pmc_rw_op;
3632
3633         pmc_rw_op.pm_pmcid = pmc;
3634         pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
3635         pmc_rw_op.pm_value = newvalue;
3636
3637         if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
3638                 return (-1);
3639
3640         *oldvaluep = pmc_rw_op.pm_value;
3641         return (0);
3642 }
3643
3644 int
3645 pmc_set(pmc_id_t pmc, pmc_value_t value)
3646 {
3647         struct pmc_op_pmcsetcount sc;
3648
3649         sc.pm_pmcid = pmc;
3650         sc.pm_count = value;
3651
3652         if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
3653                 return (-1);
3654         return (0);
3655 }
3656
3657 int
3658 pmc_start(pmc_id_t pmc)
3659 {
3660         struct pmc_op_simple    pmc_start_args;
3661
3662         pmc_start_args.pm_pmcid = pmc;
3663         return (PMC_CALL(PMCSTART, &pmc_start_args));
3664 }
3665
3666 int
3667 pmc_stop(pmc_id_t pmc)
3668 {
3669         struct pmc_op_simple    pmc_stop_args;
3670
3671         pmc_stop_args.pm_pmcid = pmc;
3672         return (PMC_CALL(PMCSTOP, &pmc_stop_args));
3673 }
3674
3675 int
3676 pmc_width(pmc_id_t pmcid, uint32_t *width)
3677 {
3678         unsigned int i;
3679         enum pmc_class cl;
3680
3681         cl = PMC_ID_TO_CLASS(pmcid);
3682         for (i = 0; i < cpu_info.pm_nclass; i++)
3683                 if (cpu_info.pm_classes[i].pm_class == cl) {
3684                         *width = cpu_info.pm_classes[i].pm_width;
3685                         return (0);
3686                 }
3687         errno = EINVAL;
3688         return (-1);
3689 }
3690
3691 int
3692 pmc_write(pmc_id_t pmc, pmc_value_t value)
3693 {
3694         struct pmc_op_pmcrw pmc_write_op;
3695
3696         pmc_write_op.pm_pmcid = pmc;
3697         pmc_write_op.pm_flags = PMC_F_NEWVALUE;
3698         pmc_write_op.pm_value = value;
3699         return (PMC_CALL(PMCRW, &pmc_write_op));
3700 }
3701
3702 int
3703 pmc_writelog(uint32_t userdata)
3704 {
3705         struct pmc_op_writelog wl;
3706
3707         wl.pm_userdata = userdata;
3708         return (PMC_CALL(WRITELOG, &wl));
3709 }