2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2018, Matthew Macy
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/types.h>
32 #include <sys/errno.h>
33 #include <sys/sysctl.h>
42 #include <libpmcstat.h>
43 #include "pmu-events/pmu-events.h"
45 #if defined(__amd64__) || defined(__i386__)
57 static struct pmu_alias pmu_intel_alias_table[] = {
58 {"UNHALTED_CORE_CYCLES", "CPU_CLK_UNHALTED.THREAD_P_ANY"},
59 {"UNHALTED-CORE-CYCLES", "CPU_CLK_UNHALTED.THREAD_P_ANY"},
60 {"LLC_MISSES", "LONGEST_LAT_CACHE.MISS"},
61 {"LLC-MISSES", "LONGEST_LAT_CACHE.MISS"},
62 {"LLC_REFERENCE", "LONGEST_LAT_CACHE.REFERENCE"},
63 {"LLC-REFERENCE", "LONGEST_LAT_CACHE.REFERENCE"},
64 {"LLC_MISS_RHITM", "mem_load_l3_miss_retired.remote_hitm"},
65 {"LLC-MISS-RHITM", "mem_load_l3_miss_retired.remote_hitm"},
66 {"RESOURCE_STALL", "RESOURCE_STALLS.ANY"},
67 {"RESOURCE_STALLS_ANY", "RESOURCE_STALLS.ANY"},
68 {"BRANCH_INSTRUCTION_RETIRED", "BR_INST_RETIRED.ALL_BRANCHES"},
69 {"BRANCH-INSTRUCTION-RETIRED", "BR_INST_RETIRED.ALL_BRANCHES"},
70 {"BRANCH_MISSES_RETIRED", "BR_MISP_RETIRED.ALL_BRANCHES"},
71 {"BRANCH-MISSES-RETIRED", "BR_MISP_RETIRED.ALL_BRANCHES"},
72 {"cycles", "tsc-tsc"},
73 {"unhalted-cycles", "CPU_CLK_UNHALTED.THREAD_P_ANY"},
74 {"instructions", "inst-retired.any_p"},
75 {"branch-mispredicts", "br_misp_retired.all_branches"},
76 {"branches", "br_inst_retired.all_branches"},
77 {"interrupts", "hw_interrupts.received"},
78 {"ic-misses", "frontend_retired.l1i_miss"},
82 static struct pmu_alias pmu_amd_alias_table[] = {
83 {"UNHALTED_CORE_CYCLES", "ls_not_halted_cyc"},
84 {"UNHALTED-CORE-CYCLES", "ls_not_halted_cyc"},
96 if (sysctlbyname("kern.hwpmc.cpuid", (void *)NULL, &s,
97 (void *)NULL, 0) == -1)
99 if ((buf = malloc(s + 1)) == NULL)
100 return (PMU_INVALID);
101 if (sysctlbyname("kern.hwpmc.cpuid", buf, &s,
102 (void *)NULL, 0) == -1) {
104 return (PMU_INVALID);
106 if (strcasestr(buf, "AuthenticAMD") != NULL)
108 else if (strcasestr(buf, "GenuineIntel") != NULL)
117 * The Intel fixed mode counters are:
118 * "inst_retired.any",
119 * "cpu_clk_unhalted.thread",
120 * "cpu_clk_unhalted.thread_any",
121 * "cpu_clk_unhalted.ref_tsc",
126 pmu_alias_get(const char *name)
129 struct pmu_alias *pa;
130 struct pmu_alias *pmu_alias_table;
132 if ((mfr = pmu_events_mfr()) == PMU_INVALID)
135 pmu_alias_table = pmu_amd_alias_table;
136 else if (mfr == PMU_INTEL)
137 pmu_alias_table = pmu_intel_alias_table;
141 for (pa = pmu_alias_table; pa->pa_alias != NULL; pa++)
142 if (strcasecmp(name, pa->pa_alias) == 0)
143 return (pa->pa_name);
148 struct pmu_event_desc {
150 uint64_t ped_offcore_rsp;
151 uint64_t ped_l3_thread;
152 uint64_t ped_l3_slice;
154 uint32_t ped_frontend;
156 uint32_t ped_config1;
166 static const struct pmu_events_map *
167 pmu_events_map_get(const char *cpuid)
170 regmatch_t pmatch[1];
174 const struct pmu_events_map *pme;
177 memcpy(buf, cpuid, 64);
179 if (sysctlbyname("kern.hwpmc.cpuid", (void *)NULL, &s,
180 (void *)NULL, 0) == -1)
182 if (sysctlbyname("kern.hwpmc.cpuid", buf, &s,
183 (void *)NULL, 0) == -1)
186 for (pme = pmu_events_map; pme->cpuid != NULL; pme++) {
187 if (regcomp(&re, pme->cpuid, REG_EXTENDED) != 0) {
188 printf("regex '%s' failed to compile, ignoring\n",
192 match = regexec(&re, buf, 1, pmatch, 0);
195 len = pmatch[0].rm_eo - pmatch[0].rm_so;
196 if(len == strlen(buf))
203 static const struct pmu_event *
204 pmu_event_get(const char *cpuid, const char *event_name, int *idx)
206 const struct pmu_events_map *pme;
207 const struct pmu_event *pe;
210 if ((pme = pmu_events_map_get(cpuid)) == NULL)
212 for (i = 0, pe = pme->table; pe->name || pe->desc || pe->event; pe++, i++) {
213 if (pe->name == NULL)
215 if (strcasecmp(pe->name, event_name) == 0) {
225 pmc_pmu_idx_get_by_event(const char *cpuid, const char *event)
228 const char *realname;
230 realname = pmu_alias_get(event);
231 if (pmu_event_get(cpuid, realname, &idx) == NULL)
237 pmc_pmu_event_get_by_idx(const char *cpuid, int idx)
239 const struct pmu_events_map *pme;
241 if ((pme = pmu_events_map_get(cpuid)) == NULL)
243 assert(pme->table[idx].name);
244 return (pme->table[idx].name);
248 pmu_parse_event(struct pmu_event_desc *ped, const char *eventin)
251 char *kvp, *key, *value, *r;
254 if ((event = strdup(eventin)) == NULL)
257 bzero(ped, sizeof(*ped));
258 ped->ped_period = DEFAULT_SAMPLE_COUNT;
260 while ((kvp = strsep(&event, ",")) != NULL) {
261 key = strsep(&kvp, "=");
265 if (strcmp(key, "umask") == 0)
266 ped->ped_umask = strtol(value, NULL, 16);
267 else if (strcmp(key, "event") == 0)
268 ped->ped_event = strtol(value, NULL, 16);
269 else if (strcmp(key, "period") == 0)
270 ped->ped_period = strtol(value, NULL, 10);
271 else if (strcmp(key, "offcore_rsp") == 0)
272 ped->ped_offcore_rsp = strtol(value, NULL, 16);
273 else if (strcmp(key, "any") == 0)
274 ped->ped_any = strtol(value, NULL, 10);
275 else if (strcmp(key, "cmask") == 0)
276 ped->ped_cmask = strtol(value, NULL, 10);
277 else if (strcmp(key, "inv") == 0)
278 ped->ped_inv = strtol(value, NULL, 10);
279 else if (strcmp(key, "edge") == 0)
280 ped->ped_edge = strtol(value, NULL, 10);
281 else if (strcmp(key, "frontend") == 0)
282 ped->ped_frontend = strtol(value, NULL, 16);
283 else if (strcmp(key, "ldlat") == 0)
284 ped->ped_ldlat = strtol(value, NULL, 16);
285 else if (strcmp(key, "fc_mask") == 0)
286 ped->ped_fc_mask = strtol(value, NULL, 16);
287 else if (strcmp(key, "ch_mask") == 0)
288 ped->ped_ch_mask = strtol(value, NULL, 16);
289 else if (strcmp(key, "config1") == 0)
290 ped->ped_config1 = strtol(value, NULL, 16);
291 else if (strcmp(key, "l3_thread_mask") == 0)
292 ped->ped_l3_thread = strtol(value, NULL, 16);
293 else if (strcmp(key, "l3_slice_mask") == 0)
294 ped->ped_l3_slice = strtol(value, NULL, 16);
296 debug = getenv("PMUDEBUG");
297 if (debug != NULL && strcmp(debug, "true") == 0 && value != NULL)
298 printf("unrecognized kvpair: %s:%s\n", key, value);
306 pmc_pmu_sample_rate_get(const char *event_name)
308 const struct pmu_event *pe;
309 struct pmu_event_desc ped;
311 event_name = pmu_alias_get(event_name);
312 if ((pe = pmu_event_get(NULL, event_name, NULL)) == NULL)
313 return (DEFAULT_SAMPLE_COUNT);
314 if (pe->alias && (pe = pmu_event_get(NULL, pe->alias, NULL)) == NULL)
315 return (DEFAULT_SAMPLE_COUNT);
316 if (pe->event == NULL)
317 return (DEFAULT_SAMPLE_COUNT);
318 if (pmu_parse_event(&ped, pe->event))
319 return (DEFAULT_SAMPLE_COUNT);
320 return (ped.ped_period);
324 pmc_pmu_enabled(void)
327 return (pmu_events_map_get(NULL) != NULL);
331 pmc_pmu_print_counters(const char *event_name)
333 const struct pmu_events_map *pme;
334 const struct pmu_event *pe;
335 struct pmu_event_desc ped;
339 debug = getenv("PMUDEBUG");
342 if (debug != NULL && strcmp(debug, "true") == 0)
344 if ((pme = pmu_events_map_get(NULL)) == NULL)
346 for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
347 if (pe->name == NULL)
349 if (event_name != NULL && strcasestr(pe->name, event_name) == NULL)
351 printf("\t%s\n", pe->name);
353 pmu_parse_event(&ped, pe->event);
358 pmc_pmu_print_counter_desc(const char *ev)
360 const struct pmu_events_map *pme;
361 const struct pmu_event *pe;
363 if ((pme = pmu_events_map_get(NULL)) == NULL)
365 for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
366 if (pe->name == NULL)
368 if (strcasestr(pe->name, ev) != NULL &&
370 printf("%s:\t%s\n", pe->name, pe->desc);
375 pmc_pmu_print_counter_desc_long(const char *ev)
377 const struct pmu_events_map *pme;
378 const struct pmu_event *pe;
380 if ((pme = pmu_events_map_get(NULL)) == NULL)
382 for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
383 if (pe->name == NULL)
385 if (strcasestr(pe->name, ev) != NULL) {
386 if (pe->long_desc != NULL)
387 printf("%s:\n%s\n", pe->name, pe->long_desc);
388 else if (pe->desc != NULL)
389 printf("%s:\t%s\n", pe->name, pe->desc);
395 pmc_pmu_print_counter_full(const char *ev)
397 const struct pmu_events_map *pme;
398 const struct pmu_event *pe;
400 if ((pme = pmu_events_map_get(NULL)) == NULL)
402 for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
403 if (pe->name == NULL)
405 if (strcasestr(pe->name, ev) == NULL)
407 printf("name: %s\n", pe->name);
408 if (pe->long_desc != NULL)
409 printf("desc: %s\n", pe->long_desc);
410 else if (pe->desc != NULL)
411 printf("desc: %s\n", pe->desc);
412 if (pe->event != NULL)
413 printf("event: %s\n", pe->event);
414 if (pe->topic != NULL)
415 printf("topic: %s\n", pe->topic);
417 printf("pmu: %s\n", pe->pmu);
418 if (pe->unit != NULL)
419 printf("unit: %s\n", pe->unit);
420 if (pe->perpkg != NULL)
421 printf("perpkg: %s\n", pe->perpkg);
422 if (pe->metric_expr != NULL)
423 printf("metric_expr: %s\n", pe->metric_expr);
424 if (pe->metric_name != NULL)
425 printf("metric_name: %s\n", pe->metric_name);
426 if (pe->metric_group != NULL)
427 printf("metric_group: %s\n", pe->metric_group);
432 pmc_pmu_amd_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm,
433 struct pmu_event_desc *ped)
435 struct pmc_md_amd_op_pmcallocate *amd;
436 const struct pmu_event *pe;
439 amd = &pm->pm_md.pm_amd;
440 if (ped->ped_umask > 0) {
441 pm->pm_caps |= PMC_CAP_QUALIFIER;
442 amd->pm_amd_config |= AMD_PMC_TO_UNITMASK(ped->ped_umask);
444 pm->pm_class = PMC_CLASS_K8;
445 pe = pmu_event_get(NULL, event_name, &idx);
447 if (strcmp("l3cache", pe->topic) == 0){
448 amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event);
449 amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_L3_CACHE;
450 amd->pm_amd_config |= AMD_PMC_TO_L3SLICE(ped->ped_l3_slice);
451 amd->pm_amd_config |= AMD_PMC_TO_L3CORE(ped->ped_l3_thread);
453 else if (strcmp("data fabric", pe->topic) == 0){
455 amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK_DF(ped->ped_event);
456 amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_DATA_FABRIC;
459 amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event);
460 amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_CORE;
461 if ((pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0 ||
462 (pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) ==
463 (PMC_CAP_USER|PMC_CAP_SYSTEM))
464 amd->pm_amd_config |= (AMD_PMC_USR | AMD_PMC_OS);
465 else if (pm->pm_caps & PMC_CAP_USER)
466 amd->pm_amd_config |= AMD_PMC_USR;
467 else if (pm->pm_caps & PMC_CAP_SYSTEM)
468 amd->pm_amd_config |= AMD_PMC_OS;
470 amd->pm_amd_config |= AMD_PMC_EDGE;
472 amd->pm_amd_config |= AMD_PMC_EDGE;
473 if (pm->pm_caps & PMC_CAP_INTERRUPT)
474 amd->pm_amd_config |= AMD_PMC_INT;
480 pmc_pmu_intel_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm,
481 struct pmu_event_desc *ped)
483 struct pmc_md_iap_op_pmcallocate *iap;
487 iap = &pm->pm_md.pm_iap;
488 if (strcasestr(event_name, "UNC_") == event_name ||
489 strcasestr(event_name, "uncore") != NULL) {
490 pm->pm_class = PMC_CLASS_UCP;
491 pm->pm_caps |= PMC_CAP_QUALIFIER;
492 } else if ((ped->ped_umask == -1) ||
493 (ped->ped_event == 0x0 && ped->ped_umask == 0x3)) {
494 pm->pm_class = PMC_CLASS_IAF;
496 pm->pm_class = PMC_CLASS_IAP;
497 pm->pm_caps |= PMC_CAP_QUALIFIER;
499 iap->pm_iap_config |= IAP_EVSEL(ped->ped_event);
500 if (ped->ped_umask > 0)
501 iap->pm_iap_config |= IAP_UMASK(ped->ped_umask);
502 iap->pm_iap_config |= IAP_CMASK(ped->ped_cmask);
503 iap->pm_iap_rsp = ped->ped_offcore_rsp;
505 if ((pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0 ||
506 (pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) ==
507 (PMC_CAP_USER|PMC_CAP_SYSTEM))
508 iap->pm_iap_config |= (IAP_USR | IAP_OS);
509 else if (pm->pm_caps & PMC_CAP_USER)
510 iap->pm_iap_config |= IAP_USR;
511 else if (pm->pm_caps & PMC_CAP_SYSTEM)
512 iap->pm_iap_config |= IAP_OS;
514 iap->pm_iap_config |= IAP_EDGE;
516 iap->pm_iap_config |= IAP_ANY;
518 iap->pm_iap_config |= IAP_EDGE;
519 if (pm->pm_caps & PMC_CAP_INTERRUPT)
520 iap->pm_iap_config |= IAP_INT;
525 pmc_pmu_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm)
527 const struct pmu_event *pe;
528 struct pmu_event_desc ped;
532 if ((mfr = pmu_events_mfr()) == PMU_INVALID)
535 bzero(&pm->pm_md, sizeof(pm->pm_md));
536 pm->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
537 event_name = pmu_alias_get(event_name);
538 if ((pe = pmu_event_get(NULL, event_name, &idx)) == NULL)
540 if (pe->alias && (pe = pmu_event_get(NULL, pe->alias, &idx)) == NULL)
545 if (pe->event == NULL)
547 if (pmu_parse_event(&ped, pe->event))
550 if (mfr == PMU_INTEL)
551 return (pmc_pmu_intel_pmcallocate(event_name, pm, &ped));
553 return (pmc_pmu_amd_pmcallocate(event_name, pm, &ped));
557 * Ultimately rely on AMD calling theirs the same
559 static const char *stat_mode_cntrs[] = {
560 "cpu_clk_unhalted.thread",
562 "br_inst_retired.all_branches",
563 "br_misp_retired.all_branches",
564 "longest_lat_cache.reference",
565 "longest_lat_cache.miss",
569 pmc_pmu_stat_mode(const char ***cntrs)
571 if (pmc_pmu_enabled()) {
572 *cntrs = stat_mode_cntrs;
581 pmc_pmu_sample_rate_get(const char *event_name __unused)
583 return (DEFAULT_SAMPLE_COUNT);
587 pmc_pmu_print_counters(const char *event_name __unused)
592 pmc_pmu_print_counter_desc(const char *e __unused)
597 pmc_pmu_print_counter_desc_long(const char *e __unused)
602 pmc_pmu_print_counter_full(const char *e __unused)
608 pmc_pmu_enabled(void)
614 pmc_pmu_pmcallocate(const char *e __unused, struct pmc_op_pmcallocate *p __unused)
620 pmc_pmu_event_get_by_idx(const char *c __unused, int idx __unused)
626 pmc_pmu_stat_mode(const char ***a __unused)
632 pmc_pmu_idx_get_by_event(const char *c __unused, const char *e __unused)