2 * Copyright (c) 2003-2007 Joseph Koshy
3 * Copyright (c) 2007 The FreeBSD Foundation
6 * Portions of this software were developed by A. Joseph Koshy under
7 * sponsorship from the FreeBSD Foundation and Google, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 /* Support for the AMD K7 and later processors */
37 #include <sys/param.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
43 #include <sys/systm.h>
45 #include <machine/cpu.h>
46 #include <machine/cpufunc.h>
47 #include <machine/md_var.h>
48 #include <machine/specialreg.h>
51 enum pmc_class amd_pmc_class;
54 /* AMD K7 & K8 PMCs */
56 struct pmc_descr pm_descr; /* "base class" */
57 uint32_t pm_evsel; /* address of EVSEL register */
58 uint32_t pm_perfctr; /* address of PERFCTR register */
61 static struct amd_descr amd_pmcdesc[AMD_NPMCS] =
67 .pd_class = PMC_CLASS_TSC,
68 .pd_caps = PMC_CAP_READ,
72 .pm_perfctr = 0 /* unused */
80 .pd_caps = AMD_PMC_CAPS,
83 .pm_evsel = AMD_PMC_EVSEL_0,
84 .pm_perfctr = AMD_PMC_PERFCTR_0
91 .pd_caps = AMD_PMC_CAPS,
94 .pm_evsel = AMD_PMC_EVSEL_1,
95 .pm_perfctr = AMD_PMC_PERFCTR_1
102 .pd_caps = AMD_PMC_CAPS,
105 .pm_evsel = AMD_PMC_EVSEL_2,
106 .pm_perfctr = AMD_PMC_PERFCTR_2
113 .pd_caps = AMD_PMC_CAPS,
116 .pm_evsel = AMD_PMC_EVSEL_3,
117 .pm_perfctr = AMD_PMC_PERFCTR_3
121 struct amd_event_code_map {
122 enum pmc_event pe_ev; /* enum value */
123 uint8_t pe_code; /* encoded event mask */
124 uint8_t pe_mask; /* bits allowed in unit mask */
127 const struct amd_event_code_map amd_event_codes[] = {
128 #if defined(__i386__) /* 32 bit Athlon (K7) only */
129 { PMC_EV_K7_DC_ACCESSES, 0x40, 0 },
130 { PMC_EV_K7_DC_MISSES, 0x41, 0 },
131 { PMC_EV_K7_DC_REFILLS_FROM_L2, 0x42, AMD_PMC_UNITMASK_MOESI },
132 { PMC_EV_K7_DC_REFILLS_FROM_SYSTEM, 0x43, AMD_PMC_UNITMASK_MOESI },
133 { PMC_EV_K7_DC_WRITEBACKS, 0x44, AMD_PMC_UNITMASK_MOESI },
134 { PMC_EV_K7_L1_DTLB_MISS_AND_L2_DTLB_HITS, 0x45, 0 },
135 { PMC_EV_K7_L1_AND_L2_DTLB_MISSES, 0x46, 0 },
136 { PMC_EV_K7_MISALIGNED_REFERENCES, 0x47, 0 },
138 { PMC_EV_K7_IC_FETCHES, 0x80, 0 },
139 { PMC_EV_K7_IC_MISSES, 0x81, 0 },
141 { PMC_EV_K7_L1_ITLB_MISSES, 0x84, 0 },
142 { PMC_EV_K7_L1_L2_ITLB_MISSES, 0x85, 0 },
144 { PMC_EV_K7_RETIRED_INSTRUCTIONS, 0xC0, 0 },
145 { PMC_EV_K7_RETIRED_OPS, 0xC1, 0 },
146 { PMC_EV_K7_RETIRED_BRANCHES, 0xC2, 0 },
147 { PMC_EV_K7_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0 },
148 { PMC_EV_K7_RETIRED_TAKEN_BRANCHES, 0xC4, 0 },
149 { PMC_EV_K7_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0 },
150 { PMC_EV_K7_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0 },
151 { PMC_EV_K7_RETIRED_RESYNC_BRANCHES, 0xC7, 0 },
152 { PMC_EV_K7_INTERRUPTS_MASKED_CYCLES, 0xCD, 0 },
153 { PMC_EV_K7_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0 },
154 { PMC_EV_K7_HARDWARE_INTERRUPTS, 0xCF, 0 },
157 { PMC_EV_K8_FP_DISPATCHED_FPU_OPS, 0x00, 0x3F },
158 { PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED, 0x01, 0x00 },
159 { PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS, 0x02, 0x00 },
161 { PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD, 0x20, 0x7F },
162 { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE,
164 { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x22, 0x00 },
165 { PMC_EV_K8_LS_BUFFER2_FULL, 0x23, 0x00 },
166 { PMC_EV_K8_LS_LOCKED_OPERATION, 0x24, 0x07 },
167 { PMC_EV_K8_LS_MICROARCHITECTURAL_LATE_CANCEL, 0x25, 0x00 },
168 { PMC_EV_K8_LS_RETIRED_CFLUSH_INSTRUCTIONS, 0x26, 0x00 },
169 { PMC_EV_K8_LS_RETIRED_CPUID_INSTRUCTIONS, 0x27, 0x00 },
171 { PMC_EV_K8_DC_ACCESS, 0x40, 0x00 },
172 { PMC_EV_K8_DC_MISS, 0x41, 0x00 },
173 { PMC_EV_K8_DC_REFILL_FROM_L2, 0x42, 0x1F },
174 { PMC_EV_K8_DC_REFILL_FROM_SYSTEM, 0x43, 0x1F },
175 { PMC_EV_K8_DC_COPYBACK, 0x44, 0x1F },
176 { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_HIT, 0x45, 0x00 },
177 { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_MISS, 0x46, 0x00 },
178 { PMC_EV_K8_DC_MISALIGNED_DATA_REFERENCE, 0x47, 0x00 },
179 { PMC_EV_K8_DC_MICROARCHITECTURAL_LATE_CANCEL, 0x48, 0x00 },
180 { PMC_EV_K8_DC_MICROARCHITECTURAL_EARLY_CANCEL, 0x49, 0x00 },
181 { PMC_EV_K8_DC_ONE_BIT_ECC_ERROR, 0x4A, 0x03 },
182 { PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS, 0x4B, 0x07 },
183 { PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS, 0x4C, 0x03 },
185 { PMC_EV_K8_BU_CPU_CLK_UNHALTED, 0x76, 0x00 },
186 { PMC_EV_K8_BU_INTERNAL_L2_REQUEST, 0x7D, 0x1F },
187 { PMC_EV_K8_BU_FILL_REQUEST_L2_MISS, 0x7E, 0x07 },
188 { PMC_EV_K8_BU_FILL_INTO_L2, 0x7F, 0x03 },
190 { PMC_EV_K8_IC_FETCH, 0x80, 0x00 },
191 { PMC_EV_K8_IC_MISS, 0x81, 0x00 },
192 { PMC_EV_K8_IC_REFILL_FROM_L2, 0x82, 0x00 },
193 { PMC_EV_K8_IC_REFILL_FROM_SYSTEM, 0x83, 0x00 },
194 { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_HIT, 0x84, 0x00 },
195 { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_MISS, 0x85, 0x00 },
196 { PMC_EV_K8_IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x86, 0x00 },
197 { PMC_EV_K8_IC_INSTRUCTION_FETCH_STALL, 0x87, 0x00 },
198 { PMC_EV_K8_IC_RETURN_STACK_HIT, 0x88, 0x00 },
199 { PMC_EV_K8_IC_RETURN_STACK_OVERFLOW, 0x89, 0x00 },
201 { PMC_EV_K8_FR_RETIRED_X86_INSTRUCTIONS, 0xC0, 0x00 },
202 { PMC_EV_K8_FR_RETIRED_UOPS, 0xC1, 0x00 },
203 { PMC_EV_K8_FR_RETIRED_BRANCHES, 0xC2, 0x00 },
204 { PMC_EV_K8_FR_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0x00 },
205 { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES, 0xC4, 0x00 },
206 { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0x00 },
207 { PMC_EV_K8_FR_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0x00 },
208 { PMC_EV_K8_FR_RETIRED_RESYNCS, 0xC7, 0x00 },
209 { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS, 0xC8, 0x00 },
210 { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS_MISPREDICTED, 0xC9, 0x00 },
211 { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE,
213 { PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS, 0xCB, 0x0F },
214 { PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS,
216 { PMC_EV_K8_FR_INTERRUPTS_MASKED_CYCLES, 0xCD, 0x00 },
217 { PMC_EV_K8_FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0x00 },
218 { PMC_EV_K8_FR_TAKEN_HARDWARE_INTERRUPTS, 0xCF, 0x00 },
220 { PMC_EV_K8_FR_DECODER_EMPTY, 0xD0, 0x00 },
221 { PMC_EV_K8_FR_DISPATCH_STALLS, 0xD1, 0x00 },
222 { PMC_EV_K8_FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE,
224 { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SERIALIZATION, 0xD3, 0x00 },
225 { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SEGMENT_LOAD, 0xD4, 0x00 },
226 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL,
228 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL,
230 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FPU_IS_FULL, 0xD7, 0x00 },
231 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_LS_IS_FULL, 0xD8, 0x00 },
232 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET,
234 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING,
236 { PMC_EV_K8_FR_FPU_EXCEPTIONS, 0xDB, 0x0F },
237 { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR0, 0xDC, 0x00 },
238 { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR1, 0xDD, 0x00 },
239 { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR2, 0xDE, 0x00 },
240 { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR3, 0xDF, 0x00 },
242 { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT, 0xE0, 0x7 },
243 { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW, 0xE1, 0x00 },
244 { PMC_EV_K8_NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED,
246 { PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND, 0xE3, 0x07 },
247 { PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION, 0xE4, 0x0F },
248 { PMC_EV_K8_NB_SIZED_COMMANDS, 0xEB, 0x7F },
249 { PMC_EV_K8_NB_PROBE_RESULT, 0xEC, 0x0F },
250 { PMC_EV_K8_NB_HT_BUS0_BANDWIDTH, 0xF6, 0x0F },
251 { PMC_EV_K8_NB_HT_BUS1_BANDWIDTH, 0xF7, 0x0F },
252 { PMC_EV_K8_NB_HT_BUS2_BANDWIDTH, 0xF8, 0x0F }
256 const int amd_event_codes_size =
257 sizeof(amd_event_codes) / sizeof(amd_event_codes[0]);
260 * read a pmc register
264 amd_read_pmc(int cpu, int ri, pmc_value_t *v)
267 const struct amd_descr *pd;
269 const struct pmc_hw *phw;
272 KASSERT(cpu >= 0 && cpu < mp_ncpus,
273 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
274 KASSERT(ri >= 0 && ri < AMD_NPMCS,
275 ("[amd,%d] illegal row-index %d", __LINE__, ri));
277 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
278 pd = &amd_pmcdesc[ri];
282 ("[amd,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
285 mode = PMC_TO_MODE(pm);
287 PMCDBG(MDP,REA,1,"amd-read id=%d class=%d", ri, pd->pm_descr.pd_class);
289 /* Reading the TSC is a special case */
290 if (pd->pm_descr.pd_class == PMC_CLASS_TSC) {
291 KASSERT(PMC_IS_COUNTING_MODE(mode),
292 ("[amd,%d] TSC counter in non-counting mode", __LINE__));
294 PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v);
299 KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
300 ("[amd,%d] unknown PMC class (%d)", __LINE__,
301 pd->pm_descr.pd_class));
304 tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */
305 if (PMC_IS_SAMPLING_MODE(mode))
306 *v = AMD_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
310 PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v);
320 amd_write_pmc(int cpu, int ri, pmc_value_t v)
322 const struct amd_descr *pd;
324 const struct pmc_hw *phw;
327 KASSERT(cpu >= 0 && cpu < mp_ncpus,
328 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
329 KASSERT(ri >= 0 && ri < AMD_NPMCS,
330 ("[amd,%d] illegal row-index %d", __LINE__, ri));
332 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
333 pd = &amd_pmcdesc[ri];
337 ("[amd,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
340 mode = PMC_TO_MODE(pm);
342 if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
346 KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
347 ("[amd,%d] unknown PMC class (%d)", __LINE__,
348 pd->pm_descr.pd_class));
351 /* use 2's complement of the count for sampling mode PMCs */
352 if (PMC_IS_SAMPLING_MODE(mode))
353 v = AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
355 PMCDBG(MDP,WRI,1,"amd-write cpu=%d ri=%d v=%jx", cpu, ri, v);
357 /* write the PMC value */
358 wrmsr(pd->pm_perfctr, v);
363 * configure hardware pmc according to the configuration recorded in
368 amd_config_pmc(int cpu, int ri, struct pmc *pm)
372 PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
374 KASSERT(cpu >= 0 && cpu < mp_ncpus,
375 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
376 KASSERT(ri >= 0 && ri < AMD_NPMCS,
377 ("[amd,%d] illegal row-index %d", __LINE__, ri));
379 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
381 KASSERT(pm == NULL || phw->phw_pmc == NULL,
382 ("[amd,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
383 __LINE__, pm, phw->phw_pmc));
390 * Retrieve a configured PMC pointer from hardware state.
394 amd_get_config(int cpu, int ri, struct pmc **ppm)
396 *ppm = pmc_pcpu[cpu]->pc_hwpmcs[ri]->phw_pmc;
402 * Machine dependent actions taken during the context switch in of a
407 amd_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
411 PMCDBG(MDP,SWI,1, "pc=%p pp=%p enable-msr=%d", pc, pp,
412 (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0);
414 /* enable the RDPMC instruction if needed */
415 if (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS)
416 load_cr4(rcr4() | CR4_PCE);
422 * Machine dependent actions taken during the context switch out of a
427 amd_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
430 (void) pp; /* can be NULL */
432 PMCDBG(MDP,SWO,1, "pc=%p pp=%p enable-msr=%d", pc, pp, pp ?
433 (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) == 1 : 0);
435 /* always turn off the RDPMC instruction */
436 load_cr4(rcr4() & ~CR4_PCE);
442 * Check if a given allocation is feasible.
446 amd_allocate_pmc(int cpu, int ri, struct pmc *pm,
447 const struct pmc_op_pmcallocate *a)
450 uint32_t allowed_unitmask, caps, config, unitmask;
452 const struct pmc_descr *pd;
456 KASSERT(cpu >= 0 && cpu < mp_ncpus,
457 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
458 KASSERT(ri >= 0 && ri < AMD_NPMCS,
459 ("[amd,%d] illegal row index %d", __LINE__, ri));
461 pd = &amd_pmcdesc[ri].pm_descr;
463 /* check class match */
464 if (pd->pd_class != a->pm_class)
469 PMCDBG(MDP,ALL,1,"amd-allocate ri=%d caps=0x%x", ri, caps);
471 if ((pd->pd_caps & caps) != caps)
473 if (pd->pd_class == PMC_CLASS_TSC) {
474 /* TSC's are always allocated in system-wide counting mode */
475 if (a->pm_ev != PMC_EV_TSC_TSC ||
476 a->pm_mode != PMC_MODE_SC)
482 KASSERT(pd->pd_class == amd_pmc_class,
483 ("[amd,%d] Unknown PMC class (%d)", __LINE__, pd->pd_class));
488 /* map ev to the correct event mask code */
489 config = allowed_unitmask = 0;
490 for (i = 0; i < amd_event_codes_size; i++)
491 if (amd_event_codes[i].pe_ev == pe) {
493 AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code);
495 AMD_PMC_TO_UNITMASK(amd_event_codes[i].pe_mask);
498 if (i == amd_event_codes_size)
501 unitmask = a->pm_md.pm_amd.pm_amd_config & AMD_PMC_UNITMASK;
502 if (unitmask & ~allowed_unitmask) /* disallow reserved bits */
505 if (unitmask && (caps & PMC_CAP_QUALIFIER))
508 if (caps & PMC_CAP_THRESHOLD)
509 config |= a->pm_md.pm_amd.pm_amd_config & AMD_PMC_COUNTERMASK;
511 /* set at least one of the 'usr' or 'os' caps */
512 if (caps & PMC_CAP_USER)
513 config |= AMD_PMC_USR;
514 if (caps & PMC_CAP_SYSTEM)
515 config |= AMD_PMC_OS;
516 if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0)
517 config |= (AMD_PMC_USR|AMD_PMC_OS);
519 if (caps & PMC_CAP_EDGE)
520 config |= AMD_PMC_EDGE;
521 if (caps & PMC_CAP_INVERT)
522 config |= AMD_PMC_INVERT;
523 if (caps & PMC_CAP_INTERRUPT)
524 config |= AMD_PMC_INT;
526 pm->pm_md.pm_amd.pm_amd_evsel = config; /* save config value */
528 PMCDBG(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, config);
534 * Release machine dependent state associated with a PMC. This is a
535 * no-op on this architecture.
541 amd_release_pmc(int cpu, int ri, struct pmc *pmc)
544 const struct amd_descr *pd;
550 KASSERT(cpu >= 0 && cpu < mp_ncpus,
551 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
552 KASSERT(ri >= 0 && ri < AMD_NPMCS,
553 ("[amd,%d] illegal row-index %d", __LINE__, ri));
555 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
557 KASSERT(phw->phw_pmc == NULL,
558 ("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
561 pd = &amd_pmcdesc[ri];
562 if (pd->pm_descr.pd_class == amd_pmc_class)
563 KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
564 ("[amd,%d] PMC %d released while active", __LINE__, ri));
575 amd_start_pmc(int cpu, int ri)
580 const struct amd_descr *pd;
582 KASSERT(cpu >= 0 && cpu < mp_ncpus,
583 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
584 KASSERT(ri >= 0 && ri < AMD_NPMCS,
585 ("[amd,%d] illegal row-index %d", __LINE__, ri));
587 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
589 pd = &amd_pmcdesc[ri];
592 ("[amd,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
595 PMCDBG(MDP,STA,1,"amd-start cpu=%d ri=%d", cpu, ri);
597 if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
598 return 0; /* TSCs are always running */
601 KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
602 ("[amd,%d] unknown PMC class (%d)", __LINE__,
603 pd->pm_descr.pd_class));
606 KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
607 ("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__,
608 ri, cpu, pd->pm_descr.pd_name));
610 /* turn on the PMC ENABLE bit */
611 config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE;
613 PMCDBG(MDP,STA,2,"amd-start config=0x%x", config);
615 wrmsr(pd->pm_evsel, config);
624 amd_stop_pmc(int cpu, int ri)
628 const struct amd_descr *pd;
631 KASSERT(cpu >= 0 && cpu < mp_ncpus,
632 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
633 KASSERT(ri >= 0 && ri < AMD_NPMCS,
634 ("[amd,%d] illegal row-index %d", __LINE__, ri));
636 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
638 pd = &amd_pmcdesc[ri];
641 ("[amd,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
644 /* can't stop a TSC */
645 if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
649 KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
650 ("[amd,%d] unknown PMC class (%d)", __LINE__,
651 pd->pm_descr.pd_class));
654 KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel),
655 ("[amd,%d] PMC%d, CPU%d \"%s\" already stopped",
656 __LINE__, ri, cpu, pd->pm_descr.pd_name));
658 PMCDBG(MDP,STO,1,"amd-stop ri=%d", ri);
660 /* turn off the PMC ENABLE bit */
661 config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE;
662 wrmsr(pd->pm_evsel, config);
667 * Interrupt handler. This function needs to return '1' if the
668 * interrupt was this CPU's PMCs or '0' otherwise. It is not allowed
669 * to sleep or do anything a 'fast' interrupt handler is not allowed
674 amd_intr(int cpu, struct trapframe *tf)
676 int i, error, retval, ri;
677 uint32_t config, evsel, perfctr;
683 KASSERT(cpu >= 0 && cpu < mp_ncpus,
684 ("[amd,%d] out of range CPU %d", __LINE__, cpu));
686 PMCDBG(MDP,INT,1, "cpu=%d tf=0x%p um=%d", cpu, (void *) tf,
694 * look for all PMCs that have interrupted:
695 * - skip over the TSC [PMC#0]
696 * - look for a running, sampling PMC which has overflowed
697 * and which has a valid 'struct pmc' association
699 * If found, we call a helper to process the interrupt.
701 * If multiple PMCs interrupt at the same time, the AMD64
702 * processor appears to deliver as many NMIs as there are
703 * outstanding PMC interrupts. So we process only one NMI
704 * interrupt at a time.
707 for (i = 0; retval == 0 && i < AMD_NPMCS-1; i++) {
709 ri = i + 1; /* row index; TSC is at ri == 0 */
711 if (!AMD_PMC_HAS_OVERFLOWED(i))
714 phw = pc->pc_hwpmcs[ri];
716 KASSERT(phw != NULL, ("[amd,%d] null PHW pointer", __LINE__));
718 if ((pm = phw->phw_pmc) == NULL ||
719 pm->pm_state != PMC_STATE_RUNNING ||
720 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
724 retval = 1; /* Found an interrupting PMC. */
726 /* Stop the PMC, reload count. */
727 evsel = AMD_PMC_EVSEL_0 + i;
728 perfctr = AMD_PMC_PERFCTR_0 + i;
729 v = pm->pm_sc.pm_reloadcount;
730 config = rdmsr(evsel);
732 KASSERT((config & ~AMD_PMC_ENABLE) ==
733 (pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE),
734 ("[amd,%d] config mismatch reg=0x%x pm=0x%x", __LINE__,
735 config, pm->pm_md.pm_amd.pm_amd_evsel));
737 wrmsr(evsel, config & ~AMD_PMC_ENABLE);
738 wrmsr(perfctr, AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(v));
740 /* Restart the counter if logging succeeded. */
741 error = pmc_process_interrupt(cpu, pm, tf, TRAPF_USERMODE(tf));
743 wrmsr(evsel, config | AMD_PMC_ENABLE);
746 atomic_add_int(retval ? &pmc_stats.pm_intr_processed :
747 &pmc_stats.pm_intr_ignored, 1);
756 amd_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
760 const struct amd_descr *pd;
763 KASSERT(cpu >= 0 && cpu < mp_ncpus,
764 ("[amd,%d] illegal CPU %d", __LINE__, cpu));
765 KASSERT(ri >= 0 && ri < AMD_NPMCS,
766 ("[amd,%d] row-index %d out of range", __LINE__, ri));
768 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
769 pd = &amd_pmcdesc[ri];
771 if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
772 PMC_NAME_MAX, &copied)) != 0)
775 pi->pm_class = pd->pm_descr.pd_class;
777 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
778 pi->pm_enabled = TRUE;
779 *ppmc = phw->phw_pmc;
781 pi->pm_enabled = FALSE;
789 * i386 specific entry points
793 * return the MSR address of the given PMC.
797 amd_get_msr(int ri, uint32_t *msr)
799 KASSERT(ri >= 0 && ri < AMD_NPMCS,
800 ("[amd,%d] ri %d out of range", __LINE__, ri));
802 *msr = amd_pmcdesc[ri].pm_perfctr - AMD_PMC_PERFCTR_0;
807 * processor dependent initialization.
811 * Per-processor data structure
814 * [5 struct pmc_hw pointers]
815 * [5 struct pmc_hw structures]
819 struct pmc_cpu pc_common;
820 struct pmc_hw *pc_hwpmcs[AMD_NPMCS];
821 struct pmc_hw pc_amdpmcs[AMD_NPMCS];
832 KASSERT(cpu >= 0 && cpu < mp_ncpus,
833 ("[amd,%d] insane cpu number %d", __LINE__, cpu));
835 PMCDBG(MDP,INI,1,"amd-init cpu=%d", cpu);
837 MALLOC(pcs, struct amd_cpu *, sizeof(struct amd_cpu), M_PMC,
840 phw = &pcs->pc_amdpmcs[0];
843 * Initialize the per-cpu mutex and set the content of the
844 * hardware descriptors to a known state.
847 for (n = 0; n < AMD_NPMCS; n++, phw++) {
848 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
849 PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
851 pcs->pc_hwpmcs[n] = phw;
854 /* Mark the TSC as shareable */
855 pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE;
857 pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;
864 * processor dependent cleanup prior to the KLD
875 KASSERT(cpu >= 0 && cpu < mp_ncpus,
876 ("[amd,%d] insane cpu number (%d)", __LINE__, cpu));
878 PMCDBG(MDP,INI,1,"amd-cleanup cpu=%d", cpu);
881 * First, turn off all PMCs on this CPU.
884 for (i = 0; i < 4; i++) { /* XXX this loop is now not needed */
885 evsel = rdmsr(AMD_PMC_EVSEL_0 + i);
886 evsel &= ~AMD_PMC_ENABLE;
887 wrmsr(AMD_PMC_EVSEL_0 + i, evsel);
891 * Next, free up allocated space.
894 if ((pcs = pmc_pcpu[cpu]) == NULL)
899 KASSERT(pcs->pc_hwpmcs[0]->phw_pmc == NULL,
900 ("[amd,%d] CPU%d,PMC0 still in use", __LINE__, cpu));
901 for (i = 1; i < AMD_NPMCS; i++) {
902 KASSERT(pcs->pc_hwpmcs[i]->phw_pmc == NULL,
903 ("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i));
904 KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + (i-1)),
905 ("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i));
909 pmc_pcpu[cpu] = NULL;
915 * Initialize ourselves.
919 pmc_amd_initialize(void)
921 enum pmc_cputype cputype;
922 enum pmc_class class;
923 struct pmc_mdep *pmc_mdep;
928 * The presence of hardware performance counters on the AMD
929 * Athlon, Duron or later processors, is _not_ indicated by
930 * any of the processor feature flags set by the 'CPUID'
931 * instruction, so we only check the 'instruction family'
932 * field returned by CPUID for instruction family >= 6.
935 class = cputype = -1;
937 switch (cpu_id & 0xF00) {
938 case 0x600: /* Athlon(tm) processor */
939 cputype = PMC_CPU_AMD_K7;
940 class = PMC_CLASS_K7;
943 case 0xF00: /* Athlon64/Opteron processor */
944 cputype = PMC_CPU_AMD_K8;
945 class = PMC_CLASS_K8;
950 if ((int) cputype == -1) {
951 (void) printf("pmc: Unknown AMD CPU.\n");
956 amd_pmc_class = class;
959 MALLOC(pmc_mdep, struct pmc_mdep *, sizeof(struct pmc_mdep),
960 M_PMC, M_WAITOK|M_ZERO);
962 pmc_mdep->pmd_cputype = cputype;
963 pmc_mdep->pmd_npmc = AMD_NPMCS;
965 /* this processor has two classes of usable PMCs */
966 pmc_mdep->pmd_nclass = 2;
969 pmc_mdep->pmd_classes[0].pm_class = PMC_CLASS_TSC;
970 pmc_mdep->pmd_classes[0].pm_caps = PMC_CAP_READ;
971 pmc_mdep->pmd_classes[0].pm_width = 64;
974 pmc_mdep->pmd_classes[1].pm_class = class;
975 pmc_mdep->pmd_classes[1].pm_caps = AMD_PMC_CAPS;
976 pmc_mdep->pmd_classes[1].pm_width = 48;
978 pmc_mdep->pmd_nclasspmcs[0] = 1;
979 pmc_mdep->pmd_nclasspmcs[1] = (AMD_NPMCS-1);
981 /* fill in the correct pmc name and class */
982 for (i = 1; i < AMD_NPMCS; i++) {
983 (void) snprintf(amd_pmcdesc[i].pm_descr.pd_name,
984 sizeof(amd_pmcdesc[i].pm_descr.pd_name), "%s-%d",
986 amd_pmcdesc[i].pm_descr.pd_class = class;
989 pmc_mdep->pmd_init = amd_init;
990 pmc_mdep->pmd_cleanup = amd_cleanup;
991 pmc_mdep->pmd_switch_in = amd_switch_in;
992 pmc_mdep->pmd_switch_out = amd_switch_out;
993 pmc_mdep->pmd_read_pmc = amd_read_pmc;
994 pmc_mdep->pmd_write_pmc = amd_write_pmc;
995 pmc_mdep->pmd_config_pmc = amd_config_pmc;
996 pmc_mdep->pmd_get_config = amd_get_config;
997 pmc_mdep->pmd_allocate_pmc = amd_allocate_pmc;
998 pmc_mdep->pmd_release_pmc = amd_release_pmc;
999 pmc_mdep->pmd_start_pmc = amd_start_pmc;
1000 pmc_mdep->pmd_stop_pmc = amd_stop_pmc;
1001 pmc_mdep->pmd_intr = amd_intr;
1002 pmc_mdep->pmd_describe = amd_describe;
1003 pmc_mdep->pmd_get_msr = amd_get_msr; /* i386 */
1005 PMCDBG(MDP,INI,0,"%s","amd-initialize");