2 * Copyright (c) 2003-2008 Joseph Koshy
3 * Copyright (c) 2007 The FreeBSD Foundation
6 * Portions of this software were developed by A. Joseph Koshy under
7 * sponsorship from the FreeBSD Foundation and Google, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 /* Support for the AMD K7 and later processors */
36 #include <sys/param.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
41 #include <sys/pmckern.h>
43 #include <sys/systm.h>
45 #include <machine/cpu.h>
46 #include <machine/cpufunc.h>
47 #include <machine/md_var.h>
48 #include <machine/specialreg.h>
51 enum pmc_class amd_pmc_class;
54 /* AMD K7 & K8 PMCs */
56 struct pmc_descr pm_descr; /* "base class" */
57 uint32_t pm_evsel; /* address of EVSEL register */
58 uint32_t pm_perfctr; /* address of PERFCTR register */
61 static struct amd_descr amd_pmcdesc[AMD_NPMCS] =
68 .pd_caps = AMD_PMC_CAPS,
71 .pm_evsel = AMD_PMC_EVSEL_0,
72 .pm_perfctr = AMD_PMC_PERFCTR_0
79 .pd_caps = AMD_PMC_CAPS,
82 .pm_evsel = AMD_PMC_EVSEL_1,
83 .pm_perfctr = AMD_PMC_PERFCTR_1
90 .pd_caps = AMD_PMC_CAPS,
93 .pm_evsel = AMD_PMC_EVSEL_2,
94 .pm_perfctr = AMD_PMC_PERFCTR_2
101 .pd_caps = AMD_PMC_CAPS,
104 .pm_evsel = AMD_PMC_EVSEL_3,
105 .pm_perfctr = AMD_PMC_PERFCTR_3
109 struct amd_event_code_map {
110 enum pmc_event pe_ev; /* enum value */
111 uint8_t pe_code; /* encoded event mask */
112 uint8_t pe_mask; /* bits allowed in unit mask */
115 const struct amd_event_code_map amd_event_codes[] = {
116 #if defined(__i386__) /* 32 bit Athlon (K7) only */
117 { PMC_EV_K7_DC_ACCESSES, 0x40, 0 },
118 { PMC_EV_K7_DC_MISSES, 0x41, 0 },
119 { PMC_EV_K7_DC_REFILLS_FROM_L2, 0x42, AMD_PMC_UNITMASK_MOESI },
120 { PMC_EV_K7_DC_REFILLS_FROM_SYSTEM, 0x43, AMD_PMC_UNITMASK_MOESI },
121 { PMC_EV_K7_DC_WRITEBACKS, 0x44, AMD_PMC_UNITMASK_MOESI },
122 { PMC_EV_K7_L1_DTLB_MISS_AND_L2_DTLB_HITS, 0x45, 0 },
123 { PMC_EV_K7_L1_AND_L2_DTLB_MISSES, 0x46, 0 },
124 { PMC_EV_K7_MISALIGNED_REFERENCES, 0x47, 0 },
126 { PMC_EV_K7_IC_FETCHES, 0x80, 0 },
127 { PMC_EV_K7_IC_MISSES, 0x81, 0 },
129 { PMC_EV_K7_L1_ITLB_MISSES, 0x84, 0 },
130 { PMC_EV_K7_L1_L2_ITLB_MISSES, 0x85, 0 },
132 { PMC_EV_K7_RETIRED_INSTRUCTIONS, 0xC0, 0 },
133 { PMC_EV_K7_RETIRED_OPS, 0xC1, 0 },
134 { PMC_EV_K7_RETIRED_BRANCHES, 0xC2, 0 },
135 { PMC_EV_K7_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0 },
136 { PMC_EV_K7_RETIRED_TAKEN_BRANCHES, 0xC4, 0 },
137 { PMC_EV_K7_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0 },
138 { PMC_EV_K7_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0 },
139 { PMC_EV_K7_RETIRED_RESYNC_BRANCHES, 0xC7, 0 },
140 { PMC_EV_K7_INTERRUPTS_MASKED_CYCLES, 0xCD, 0 },
141 { PMC_EV_K7_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0 },
142 { PMC_EV_K7_HARDWARE_INTERRUPTS, 0xCF, 0 },
145 { PMC_EV_K8_FP_DISPATCHED_FPU_OPS, 0x00, 0x3F },
146 { PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED, 0x01, 0x00 },
147 { PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS, 0x02, 0x00 },
149 { PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD, 0x20, 0x7F },
150 { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE,
152 { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x22, 0x00 },
153 { PMC_EV_K8_LS_BUFFER2_FULL, 0x23, 0x00 },
154 { PMC_EV_K8_LS_LOCKED_OPERATION, 0x24, 0x07 },
155 { PMC_EV_K8_LS_MICROARCHITECTURAL_LATE_CANCEL, 0x25, 0x00 },
156 { PMC_EV_K8_LS_RETIRED_CFLUSH_INSTRUCTIONS, 0x26, 0x00 },
157 { PMC_EV_K8_LS_RETIRED_CPUID_INSTRUCTIONS, 0x27, 0x00 },
159 { PMC_EV_K8_DC_ACCESS, 0x40, 0x00 },
160 { PMC_EV_K8_DC_MISS, 0x41, 0x00 },
161 { PMC_EV_K8_DC_REFILL_FROM_L2, 0x42, 0x1F },
162 { PMC_EV_K8_DC_REFILL_FROM_SYSTEM, 0x43, 0x1F },
163 { PMC_EV_K8_DC_COPYBACK, 0x44, 0x1F },
164 { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_HIT, 0x45, 0x00 },
165 { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_MISS, 0x46, 0x00 },
166 { PMC_EV_K8_DC_MISALIGNED_DATA_REFERENCE, 0x47, 0x00 },
167 { PMC_EV_K8_DC_MICROARCHITECTURAL_LATE_CANCEL, 0x48, 0x00 },
168 { PMC_EV_K8_DC_MICROARCHITECTURAL_EARLY_CANCEL, 0x49, 0x00 },
169 { PMC_EV_K8_DC_ONE_BIT_ECC_ERROR, 0x4A, 0x03 },
170 { PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS, 0x4B, 0x07 },
171 { PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS, 0x4C, 0x03 },
173 { PMC_EV_K8_BU_CPU_CLK_UNHALTED, 0x76, 0x00 },
174 { PMC_EV_K8_BU_INTERNAL_L2_REQUEST, 0x7D, 0x1F },
175 { PMC_EV_K8_BU_FILL_REQUEST_L2_MISS, 0x7E, 0x07 },
176 { PMC_EV_K8_BU_FILL_INTO_L2, 0x7F, 0x03 },
178 { PMC_EV_K8_IC_FETCH, 0x80, 0x00 },
179 { PMC_EV_K8_IC_MISS, 0x81, 0x00 },
180 { PMC_EV_K8_IC_REFILL_FROM_L2, 0x82, 0x00 },
181 { PMC_EV_K8_IC_REFILL_FROM_SYSTEM, 0x83, 0x00 },
182 { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_HIT, 0x84, 0x00 },
183 { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_MISS, 0x85, 0x00 },
184 { PMC_EV_K8_IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x86, 0x00 },
185 { PMC_EV_K8_IC_INSTRUCTION_FETCH_STALL, 0x87, 0x00 },
186 { PMC_EV_K8_IC_RETURN_STACK_HIT, 0x88, 0x00 },
187 { PMC_EV_K8_IC_RETURN_STACK_OVERFLOW, 0x89, 0x00 },
189 { PMC_EV_K8_FR_RETIRED_X86_INSTRUCTIONS, 0xC0, 0x00 },
190 { PMC_EV_K8_FR_RETIRED_UOPS, 0xC1, 0x00 },
191 { PMC_EV_K8_FR_RETIRED_BRANCHES, 0xC2, 0x00 },
192 { PMC_EV_K8_FR_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0x00 },
193 { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES, 0xC4, 0x00 },
194 { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0x00 },
195 { PMC_EV_K8_FR_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0x00 },
196 { PMC_EV_K8_FR_RETIRED_RESYNCS, 0xC7, 0x00 },
197 { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS, 0xC8, 0x00 },
198 { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS_MISPREDICTED, 0xC9, 0x00 },
199 { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE,
201 { PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS, 0xCB, 0x0F },
202 { PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS,
204 { PMC_EV_K8_FR_INTERRUPTS_MASKED_CYCLES, 0xCD, 0x00 },
205 { PMC_EV_K8_FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0x00 },
206 { PMC_EV_K8_FR_TAKEN_HARDWARE_INTERRUPTS, 0xCF, 0x00 },
208 { PMC_EV_K8_FR_DECODER_EMPTY, 0xD0, 0x00 },
209 { PMC_EV_K8_FR_DISPATCH_STALLS, 0xD1, 0x00 },
210 { PMC_EV_K8_FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE,
212 { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SERIALIZATION, 0xD3, 0x00 },
213 { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SEGMENT_LOAD, 0xD4, 0x00 },
214 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL,
216 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL,
218 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FPU_IS_FULL, 0xD7, 0x00 },
219 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_LS_IS_FULL, 0xD8, 0x00 },
220 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET,
222 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING,
224 { PMC_EV_K8_FR_FPU_EXCEPTIONS, 0xDB, 0x0F },
225 { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR0, 0xDC, 0x00 },
226 { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR1, 0xDD, 0x00 },
227 { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR2, 0xDE, 0x00 },
228 { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR3, 0xDF, 0x00 },
230 { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT, 0xE0, 0x7 },
231 { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW, 0xE1, 0x00 },
232 { PMC_EV_K8_NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED,
234 { PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND, 0xE3, 0x07 },
235 { PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION, 0xE4, 0x0F },
236 { PMC_EV_K8_NB_SIZED_COMMANDS, 0xEB, 0x7F },
237 { PMC_EV_K8_NB_PROBE_RESULT, 0xEC, 0x0F },
238 { PMC_EV_K8_NB_HT_BUS0_BANDWIDTH, 0xF6, 0x0F },
239 { PMC_EV_K8_NB_HT_BUS1_BANDWIDTH, 0xF7, 0x0F },
240 { PMC_EV_K8_NB_HT_BUS2_BANDWIDTH, 0xF8, 0x0F }
244 const int amd_event_codes_size =
245 sizeof(amd_event_codes) / sizeof(amd_event_codes[0]);
248 * Per-processor information
252 struct pmc_hw pc_amdpmcs[AMD_NPMCS];
255 static struct amd_cpu **amd_pcpu;
258 * read a pmc register
262 amd_read_pmc(int cpu, int ri, pmc_value_t *v)
265 const struct amd_descr *pd;
269 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
270 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
271 KASSERT(ri >= 0 && ri < AMD_NPMCS,
272 ("[amd,%d] illegal row-index %d", __LINE__, ri));
273 KASSERT(amd_pcpu[cpu],
274 ("[amd,%d] null per-cpu, cpu %d", __LINE__, cpu));
276 pm = amd_pcpu[cpu]->pc_amdpmcs[ri].phw_pmc;
277 pd = &amd_pmcdesc[ri];
280 ("[amd,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
283 mode = PMC_TO_MODE(pm);
285 PMCDBG(MDP,REA,1,"amd-read id=%d class=%d", ri, pd->pm_descr.pd_class);
288 KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
289 ("[amd,%d] unknown PMC class (%d)", __LINE__,
290 pd->pm_descr.pd_class));
293 tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */
294 PMCDBG(MDP,REA,2,"amd-read (pre-munge) id=%d -> %jd", ri, tmp);
295 if (PMC_IS_SAMPLING_MODE(mode)) {
296 /* Sign extend 48 bit value to 64 bits. */
297 tmp = (pmc_value_t) (((int64_t) tmp << 16) >> 16);
298 tmp = AMD_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
302 PMCDBG(MDP,REA,2,"amd-read (post-munge) id=%d -> %jd", ri, *v);
312 amd_write_pmc(int cpu, int ri, pmc_value_t v)
314 const struct amd_descr *pd;
318 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
319 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
320 KASSERT(ri >= 0 && ri < AMD_NPMCS,
321 ("[amd,%d] illegal row-index %d", __LINE__, ri));
323 pm = amd_pcpu[cpu]->pc_amdpmcs[ri].phw_pmc;
324 pd = &amd_pmcdesc[ri];
327 ("[amd,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
330 mode = PMC_TO_MODE(pm);
333 KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
334 ("[amd,%d] unknown PMC class (%d)", __LINE__,
335 pd->pm_descr.pd_class));
338 /* use 2's complement of the count for sampling mode PMCs */
339 if (PMC_IS_SAMPLING_MODE(mode))
340 v = AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
342 PMCDBG(MDP,WRI,1,"amd-write cpu=%d ri=%d v=%jx", cpu, ri, v);
344 /* write the PMC value */
345 wrmsr(pd->pm_perfctr, v);
350 * configure hardware pmc according to the configuration recorded in
355 amd_config_pmc(int cpu, int ri, struct pmc *pm)
359 PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
361 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
362 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
363 KASSERT(ri >= 0 && ri < AMD_NPMCS,
364 ("[amd,%d] illegal row-index %d", __LINE__, ri));
366 phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
368 KASSERT(pm == NULL || phw->phw_pmc == NULL,
369 ("[amd,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
370 __LINE__, pm, phw->phw_pmc));
377 * Retrieve a configured PMC pointer from hardware state.
381 amd_get_config(int cpu, int ri, struct pmc **ppm)
383 *ppm = amd_pcpu[cpu]->pc_amdpmcs[ri].phw_pmc;
389 * Machine dependent actions taken during the context switch in of a
394 amd_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
398 PMCDBG(MDP,SWI,1, "pc=%p pp=%p enable-msr=%d", pc, pp,
399 (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0);
401 /* enable the RDPMC instruction if needed */
402 if (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS)
403 load_cr4(rcr4() | CR4_PCE);
409 * Machine dependent actions taken during the context switch out of a
414 amd_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
417 (void) pp; /* can be NULL */
419 PMCDBG(MDP,SWO,1, "pc=%p pp=%p enable-msr=%d", pc, pp, pp ?
420 (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) == 1 : 0);
422 /* always turn off the RDPMC instruction */
423 load_cr4(rcr4() & ~CR4_PCE);
429 * Check if a given allocation is feasible.
433 amd_allocate_pmc(int cpu, int ri, struct pmc *pm,
434 const struct pmc_op_pmcallocate *a)
437 uint32_t allowed_unitmask, caps, config, unitmask;
439 const struct pmc_descr *pd;
443 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
444 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
445 KASSERT(ri >= 0 && ri < AMD_NPMCS,
446 ("[amd,%d] illegal row index %d", __LINE__, ri));
448 pd = &amd_pmcdesc[ri].pm_descr;
450 /* check class match */
451 if (pd->pd_class != a->pm_class)
456 PMCDBG(MDP,ALL,1,"amd-allocate ri=%d caps=0x%x", ri, caps);
458 if ((pd->pd_caps & caps) != caps)
463 /* map ev to the correct event mask code */
464 config = allowed_unitmask = 0;
465 for (i = 0; i < amd_event_codes_size; i++)
466 if (amd_event_codes[i].pe_ev == pe) {
468 AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code);
470 AMD_PMC_TO_UNITMASK(amd_event_codes[i].pe_mask);
473 if (i == amd_event_codes_size)
476 unitmask = a->pm_md.pm_amd.pm_amd_config & AMD_PMC_UNITMASK;
477 if (unitmask & ~allowed_unitmask) /* disallow reserved bits */
480 if (unitmask && (caps & PMC_CAP_QUALIFIER))
483 if (caps & PMC_CAP_THRESHOLD)
484 config |= a->pm_md.pm_amd.pm_amd_config & AMD_PMC_COUNTERMASK;
486 /* set at least one of the 'usr' or 'os' caps */
487 if (caps & PMC_CAP_USER)
488 config |= AMD_PMC_USR;
489 if (caps & PMC_CAP_SYSTEM)
490 config |= AMD_PMC_OS;
491 if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0)
492 config |= (AMD_PMC_USR|AMD_PMC_OS);
494 if (caps & PMC_CAP_EDGE)
495 config |= AMD_PMC_EDGE;
496 if (caps & PMC_CAP_INVERT)
497 config |= AMD_PMC_INVERT;
498 if (caps & PMC_CAP_INTERRUPT)
499 config |= AMD_PMC_INT;
501 pm->pm_md.pm_amd.pm_amd_evsel = config; /* save config value */
503 PMCDBG(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, config);
509 * Release machine dependent state associated with a PMC. This is a
510 * no-op on this architecture.
516 amd_release_pmc(int cpu, int ri, struct pmc *pmc)
519 const struct amd_descr *pd;
525 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
526 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
527 KASSERT(ri >= 0 && ri < AMD_NPMCS,
528 ("[amd,%d] illegal row-index %d", __LINE__, ri));
530 phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
532 KASSERT(phw->phw_pmc == NULL,
533 ("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
536 pd = &amd_pmcdesc[ri];
537 if (pd->pm_descr.pd_class == amd_pmc_class)
538 KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
539 ("[amd,%d] PMC %d released while active", __LINE__, ri));
550 amd_start_pmc(int cpu, int ri)
555 const struct amd_descr *pd;
557 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
558 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
559 KASSERT(ri >= 0 && ri < AMD_NPMCS,
560 ("[amd,%d] illegal row-index %d", __LINE__, ri));
562 phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
564 pd = &amd_pmcdesc[ri];
567 ("[amd,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
570 PMCDBG(MDP,STA,1,"amd-start cpu=%d ri=%d", cpu, ri);
572 KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
573 ("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__,
574 ri, cpu, pd->pm_descr.pd_name));
576 /* turn on the PMC ENABLE bit */
577 config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE;
579 PMCDBG(MDP,STA,2,"amd-start config=0x%x", config);
581 wrmsr(pd->pm_evsel, config);
590 amd_stop_pmc(int cpu, int ri)
594 const struct amd_descr *pd;
597 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
598 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
599 KASSERT(ri >= 0 && ri < AMD_NPMCS,
600 ("[amd,%d] illegal row-index %d", __LINE__, ri));
602 phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
604 pd = &amd_pmcdesc[ri];
607 ("[amd,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
609 KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel),
610 ("[amd,%d] PMC%d, CPU%d \"%s\" already stopped",
611 __LINE__, ri, cpu, pd->pm_descr.pd_name));
613 PMCDBG(MDP,STO,1,"amd-stop ri=%d", ri);
615 /* turn off the PMC ENABLE bit */
616 config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE;
617 wrmsr(pd->pm_evsel, config);
622 * Interrupt handler. This function needs to return '1' if the
623 * interrupt was this CPU's PMCs or '0' otherwise. It is not allowed
624 * to sleep or do anything a 'fast' interrupt handler is not allowed
629 amd_intr(int cpu, struct trapframe *tf)
631 int i, error, retval;
632 uint32_t config, evsel, perfctr;
637 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
638 ("[amd,%d] out of range CPU %d", __LINE__, cpu));
640 PMCDBG(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
648 * look for all PMCs that have interrupted:
649 * - look for a running, sampling PMC which has overflowed
650 * and which has a valid 'struct pmc' association
652 * If found, we call a helper to process the interrupt.
654 * If multiple PMCs interrupt at the same time, the AMD64
655 * processor appears to deliver as many NMIs as there are
656 * outstanding PMC interrupts. So we process only one NMI
657 * interrupt at a time.
660 for (i = 0; retval == 0 && i < AMD_NPMCS; i++) {
662 if ((pm = pac->pc_amdpmcs[i].phw_pmc) == NULL ||
663 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
667 if (!AMD_PMC_HAS_OVERFLOWED(i))
670 retval = 1; /* Found an interrupting PMC. */
672 if (pm->pm_state != PMC_STATE_RUNNING)
675 /* Stop the PMC, reload count. */
676 evsel = AMD_PMC_EVSEL_0 + i;
677 perfctr = AMD_PMC_PERFCTR_0 + i;
678 v = pm->pm_sc.pm_reloadcount;
679 config = rdmsr(evsel);
681 KASSERT((config & ~AMD_PMC_ENABLE) ==
682 (pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE),
683 ("[amd,%d] config mismatch reg=0x%x pm=0x%x", __LINE__,
684 config, pm->pm_md.pm_amd.pm_amd_evsel));
686 wrmsr(evsel, config & ~AMD_PMC_ENABLE);
687 wrmsr(perfctr, AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(v));
689 /* Restart the counter if logging succeeded. */
690 error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
693 wrmsr(evsel, config | AMD_PMC_ENABLE);
696 atomic_add_int(retval ? &pmc_stats.pm_intr_processed :
697 &pmc_stats.pm_intr_ignored, 1);
706 amd_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
710 const struct amd_descr *pd;
713 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
714 ("[amd,%d] illegal CPU %d", __LINE__, cpu));
715 KASSERT(ri >= 0 && ri < AMD_NPMCS,
716 ("[amd,%d] row-index %d out of range", __LINE__, ri));
718 phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
719 pd = &amd_pmcdesc[ri];
721 if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
722 PMC_NAME_MAX, &copied)) != 0)
725 pi->pm_class = pd->pm_descr.pd_class;
727 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
728 pi->pm_enabled = TRUE;
729 *ppmc = phw->phw_pmc;
731 pi->pm_enabled = FALSE;
739 * i386 specific entry points
743 * return the MSR address of the given PMC.
747 amd_get_msr(int ri, uint32_t *msr)
749 KASSERT(ri >= 0 && ri < AMD_NPMCS,
750 ("[amd,%d] ri %d out of range", __LINE__, ri));
752 *msr = amd_pmcdesc[ri].pm_perfctr - AMD_PMC_PERFCTR_0;
758 * processor dependent initialization.
762 amd_pcpu_init(struct pmc_mdep *md, int cpu)
764 int classindex, first_ri, n;
769 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
770 ("[amd,%d] insane cpu number %d", __LINE__, cpu));
772 PMCDBG(MDP,INI,1,"amd-init cpu=%d", cpu);
774 amd_pcpu[cpu] = pac = malloc(sizeof(struct amd_cpu), M_PMC,
778 * Set the content of the hardware descriptors to a known
779 * state and initialize pointers in the MI per-cpu descriptor.
782 #if defined(__amd64__)
783 classindex = PMC_MDEP_CLASS_INDEX_K8;
784 #elif defined(__i386__)
785 classindex = md->pmd_cputype == PMC_CPU_AMD_K8 ?
786 PMC_MDEP_CLASS_INDEX_K8 : PMC_MDEP_CLASS_INDEX_K7;
788 first_ri = md->pmd_classdep[classindex].pcd_ri;
790 KASSERT(pc != NULL, ("[amd,%d] NULL per-cpu pointer", __LINE__));
792 for (n = 0, phw = pac->pc_amdpmcs; n < AMD_NPMCS; n++, phw++) {
793 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
794 PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
796 pc->pc_hwpmcs[n + first_ri] = phw;
804 * processor dependent cleanup prior to the KLD
809 amd_pcpu_fini(struct pmc_mdep *md, int cpu)
811 int classindex, first_ri, i;
816 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
817 ("[amd,%d] insane cpu number (%d)", __LINE__, cpu));
819 PMCDBG(MDP,INI,1,"amd-cleanup cpu=%d", cpu);
822 * First, turn off all PMCs on this CPU.
824 for (i = 0; i < 4; i++) { /* XXX this loop is now not needed */
825 evsel = rdmsr(AMD_PMC_EVSEL_0 + i);
826 evsel &= ~AMD_PMC_ENABLE;
827 wrmsr(AMD_PMC_EVSEL_0 + i, evsel);
831 * Next, free up allocated space.
833 if ((pac = amd_pcpu[cpu]) == NULL)
836 amd_pcpu[cpu] = NULL;
839 for (i = 0; i < AMD_NPMCS; i++) {
840 KASSERT(pac->pc_amdpmcs[i].phw_pmc == NULL,
841 ("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i));
842 KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + i),
843 ("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i));
848 KASSERT(pc != NULL, ("[amd,%d] NULL per-cpu state", __LINE__));
850 #if defined(__amd64__)
851 classindex = PMC_MDEP_CLASS_INDEX_K8;
852 #elif defined(__i386__)
853 classindex = md->pmd_cputype == PMC_CPU_AMD_K8 ? PMC_MDEP_CLASS_INDEX_K8 :
854 PMC_MDEP_CLASS_INDEX_K7;
856 first_ri = md->pmd_classdep[classindex].pcd_ri;
859 * Reset pointers in the MI 'per-cpu' state.
861 for (i = 0; i < AMD_NPMCS; i++) {
862 pc->pc_hwpmcs[i + first_ri] = NULL;
872 * Initialize ourselves.
876 pmc_amd_initialize(void)
878 int classindex, error, i, ncpus;
879 struct pmc_classdep *pcd;
880 enum pmc_cputype cputype;
881 struct pmc_mdep *pmc_mdep;
882 enum pmc_class class;
886 * The presence of hardware performance counters on the AMD
887 * Athlon, Duron or later processors, is _not_ indicated by
888 * any of the processor feature flags set by the 'CPUID'
889 * instruction, so we only check the 'instruction family'
890 * field returned by CPUID for instruction family >= 6.
894 switch (cpu_id & 0xF00) {
895 #if defined(__i386__)
896 case 0x600: /* Athlon(tm) processor */
897 classindex = PMC_MDEP_CLASS_INDEX_K7;
898 cputype = PMC_CPU_AMD_K7;
899 class = PMC_CLASS_K7;
903 case 0xF00: /* Athlon64/Opteron processor */
904 classindex = PMC_MDEP_CLASS_INDEX_K8;
905 cputype = PMC_CPU_AMD_K8;
906 class = PMC_CLASS_K8;
911 (void) printf("pmc: Unknown AMD CPU.\n");
916 amd_pmc_class = class;
920 * Allocate space for pointers to PMC HW descriptors and for
921 * the MDEP structure used by MI code.
923 amd_pcpu = malloc(sizeof(struct amd_cpu *) * pmc_cpu_max(), M_PMC,
927 * These processors have two classes of PMCs: the TSC and
930 pmc_mdep = pmc_mdep_alloc(2);
932 pmc_mdep->pmd_cputype = cputype;
934 ncpus = pmc_cpu_max();
936 /* Initialize the TSC. */
937 error = pmc_tsc_initialize(pmc_mdep, ncpus);
941 /* Initialize AMD K7 and K8 PMC handling. */
942 pcd = &pmc_mdep->pmd_classdep[classindex];
944 pcd->pcd_caps = AMD_PMC_CAPS;
945 pcd->pcd_class = class;
946 pcd->pcd_num = AMD_NPMCS;
947 pcd->pcd_ri = pmc_mdep->pmd_npmc;
950 /* fill in the correct pmc name and class */
951 for (i = 0; i < AMD_NPMCS; i++) {
952 (void) snprintf(amd_pmcdesc[i].pm_descr.pd_name,
953 sizeof(amd_pmcdesc[i].pm_descr.pd_name), "%s-%d",
955 amd_pmcdesc[i].pm_descr.pd_class = class;
958 pcd->pcd_allocate_pmc = amd_allocate_pmc;
959 pcd->pcd_config_pmc = amd_config_pmc;
960 pcd->pcd_describe = amd_describe;
961 pcd->pcd_get_config = amd_get_config;
962 pcd->pcd_get_msr = amd_get_msr;
963 pcd->pcd_pcpu_fini = amd_pcpu_fini;
964 pcd->pcd_pcpu_init = amd_pcpu_init;
965 pcd->pcd_read_pmc = amd_read_pmc;
966 pcd->pcd_release_pmc = amd_release_pmc;
967 pcd->pcd_start_pmc = amd_start_pmc;
968 pcd->pcd_stop_pmc = amd_stop_pmc;
969 pcd->pcd_write_pmc = amd_write_pmc;
971 pmc_mdep->pmd_pcpu_init = NULL;
972 pmc_mdep->pmd_pcpu_fini = NULL;
973 pmc_mdep->pmd_intr = amd_intr;
974 pmc_mdep->pmd_switch_in = amd_switch_in;
975 pmc_mdep->pmd_switch_out = amd_switch_out;
977 pmc_mdep->pmd_npmc += AMD_NPMCS;
979 PMCDBG(MDP,INI,0,"%s","amd-initialize");
985 free(pmc_mdep, M_PMC);
993 * Finalization code for AMD CPUs.
997 pmc_amd_finalize(struct pmc_mdep *md)
999 #if defined(INVARIANTS)
1000 int classindex, i, ncpus, pmcclass;
1003 pmc_tsc_finalize(md);
1005 KASSERT(amd_pcpu != NULL, ("[amd,%d] NULL per-cpu array pointer",
1008 #if defined(INVARIANTS)
1009 switch (md->pmd_cputype) {
1010 #if defined(__i386__)
1011 case PMC_CPU_AMD_K7:
1012 classindex = PMC_MDEP_CLASS_INDEX_K7;
1013 pmcclass = PMC_CLASS_K7;
1017 classindex = PMC_MDEP_CLASS_INDEX_K8;
1018 pmcclass = PMC_CLASS_K8;
1021 KASSERT(md->pmd_classdep[classindex].pcd_class == pmcclass,
1022 ("[amd,%d] pmc class mismatch", __LINE__));
1024 ncpus = pmc_cpu_max();
1026 for (i = 0; i < ncpus; i++)
1027 KASSERT(amd_pcpu[i] == NULL, ("[amd,%d] non-null pcpu",
1031 free(amd_pcpu, M_PMC);