2 * Copyright (c) 2003-2007 Joseph Koshy
3 * Copyright (c) 2007 The FreeBSD Foundation
6 * Portions of this software were developed by A. Joseph Koshy under
7 * sponsorship from the FreeBSD Foundation and Google, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
37 #include <sys/mutex.h>
39 #include <sys/pmckern.h>
41 #include <sys/systm.h>
43 #include <machine/intr_machdep.h>
44 #include <machine/apicvar.h>
45 #include <machine/cpu.h>
46 #include <machine/cpufunc.h>
47 #include <machine/cputypes.h>
48 #include <machine/md_var.h>
49 #include <machine/specialreg.h>
54 * The P4 has 18 PMCs, divided into 4 groups with 4,4,4 and 6 PMCs
55 * respectively. Each PMC comprises of two model specific registers:
56 * a counter configuration control register (CCCR) and a counter
57 * register that holds the actual event counts.
59 * Configuring an event requires the use of one of 45 event selection
60 * control registers (ESCR). Events are associated with specific
61 * ESCRs. Each PMC group has a set of ESCRs it can use.
63 * - The BPU counter group (4 PMCs) can use the 16 ESCRs:
64 * BPU_ESCR{0,1}, IS_ESCR{0,1}, MOB_ESCR{0,1}, ITLB_ESCR{0,1},
65 * PMH_ESCR{0,1}, IX_ESCR{0,1}, FSB_ESCR{0,}, BSU_ESCR{0,1}.
67 * - The MS counter group (4 PMCs) can use the 6 ESCRs: MS_ESCR{0,1},
68 * TC_ESCR{0,1}, TBPU_ESCR{0,1}.
70 * - The FLAME counter group (4 PMCs) can use the 10 ESCRs:
71 * FLAME_ESCR{0,1}, FIRM_ESCR{0,1}, SAAT_ESCR{0,1}, U2L_ESCR{0,1},
74 * - The IQ counter group (6 PMCs) can use the 13 ESCRs: IQ_ESCR{0,1},
75 * ALF_ESCR{0,1}, RAT_ESCR{0,1}, SSU_ESCR0, CRU_ESCR{0,1,2,3,4,5}.
77 * Even-numbered ESCRs can be used with counters 0, 1 and 4 (if
78 * present) of a counter group. Odd-numbers ESCRs can be used with
79 * counters 2, 3 and 5 (if present) of a counter group. The
80 * 'p4_escrs[]' table describes these restrictions in a form that
81 * function 'p4_allocate()' uses for making allocation decisions.
83 * SYSTEM-MODE AND THREAD-MODE ALLOCATION
85 * In addition to remembering the state of PMC rows
86 * ('FREE','STANDALONE', or 'THREAD'), we similar need to track the
87 * state of ESCR rows. If an ESCR is allocated to a system-mode PMC
88 * on a CPU we cannot allocate this to a thread-mode PMC. On a
89 * multi-cpu (multiple physical CPUs) system, ESCR allocation on each
90 * CPU is tracked by the pc_escrs[] array.
92 * Each system-mode PMC that is using an ESCR records its row-index in
93 * the appropriate entry and system-mode allocation attempts check
94 * that an ESCR is available using this array. Process-mode PMCs do
95 * not use the pc_escrs[] array, since ESCR row itself would have been
96 * marked as in 'THREAD' mode.
98 * HYPERTHREADING SUPPORT
100 * When HTT is enabled, the FreeBSD kernel treats the two 'logical'
101 * cpus as independent CPUs and can schedule kernel threads on them
102 * independently. However, the two logical CPUs share the same set of
103 * PMC resources. We need to ensure that:
104 * - PMCs that use the PMC_F_DESCENDANTS semantics are handled correctly,
106 * - Threads of multi-threaded processes that get scheduled on the same
107 * physical CPU are handled correctly.
111 * Not all HTT capable systems will have HTT enabled. We detect the
112 * presence of HTT by detecting if 'p4_init()' was called for a secondary
115 * Note that hwpmc(4) cannot currently deal with a change in HTT status once
118 * Handling HTT READ / WRITE / START / STOP
120 * PMC resources are shared across the CPUs in an HTT pair. We
121 * designate the lower numbered CPU in a HTT pair as the 'primary'
122 * CPU. In each primary CPU's state we keep track of a 'runcount'
123 * which reflects the number of PMC-using processes that have been
124 * scheduled on its secondary CPU. Process-mode PMC operations will
125 * actually 'start' or 'stop' hardware only if these are the first or
126 * last processes respectively to use the hardware. PMC values
127 * written by a 'write' operation are saved and are transferred to
128 * hardware at PMC 'start' time if the runcount is 0. If the runcount
129 * is greater than 0 at the time of a 'start' operation, we keep track
130 * of the actual hardware value at the time of the 'start' operation
131 * and use this to adjust the final readings at PMC 'stop' or 'read'
134 * Execution sequences:
136 * Case 1: CPUx +...- (no overlap)
140 * Case 2: CPUx +........- (partial overlap)
144 * Case 3: CPUx +..............- (fully overlapped)
149 * 'CPU[xy]' : one of the two logical processors on a HTT CPU.
150 * 'RC' : run count (#threads per physical core).
151 * '+' : point in time when a thread is put on a CPU.
152 * '-' : point in time where a thread is taken off a CPU.
154 * Handling HTT CONFIG
156 * Different processes attached to the same PMC may get scheduled on
157 * the two logical processors in the package. We keep track of config
158 * and de-config operations using the CFGFLAGS fields of the per-physical
163 P4_PMC(BPU_COUNTER0) \
164 P4_PMC(BPU_COUNTER1) \
165 P4_PMC(BPU_COUNTER2) \
166 P4_PMC(BPU_COUNTER3) \
167 P4_PMC(MS_COUNTER0) \
168 P4_PMC(MS_COUNTER1) \
169 P4_PMC(MS_COUNTER2) \
170 P4_PMC(MS_COUNTER3) \
171 P4_PMC(FLAME_COUNTER0) \
172 P4_PMC(FLAME_COUNTER1) \
173 P4_PMC(FLAME_COUNTER2) \
174 P4_PMC(FLAME_COUNTER3) \
175 P4_PMC(IQ_COUNTER0) \
176 P4_PMC(IQ_COUNTER1) \
177 P4_PMC(IQ_COUNTER2) \
178 P4_PMC(IQ_COUNTER3) \
179 P4_PMC(IQ_COUNTER4) \
180 P4_PMC(IQ_COUNTER5) \
185 #define P4_PMC(N) P4_PMC_##N ,
190 * P4 ESCR descriptors
194 P4_ESCR(BSU_ESCR0, 0x3A0, BPU_COUNTER0, BPU_COUNTER1, NONE) \
195 P4_ESCR(BSU_ESCR1, 0x3A1, BPU_COUNTER2, BPU_COUNTER3, NONE) \
196 P4_ESCR(FSB_ESCR0, 0x3A2, BPU_COUNTER0, BPU_COUNTER1, NONE) \
197 P4_ESCR(FSB_ESCR1, 0x3A3, BPU_COUNTER2, BPU_COUNTER3, NONE) \
198 P4_ESCR(FIRM_ESCR0, 0x3A4, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
199 P4_ESCR(FIRM_ESCR1, 0x3A5, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
200 P4_ESCR(FLAME_ESCR0, 0x3A6, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
201 P4_ESCR(FLAME_ESCR1, 0x3A7, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
202 P4_ESCR(DAC_ESCR0, 0x3A8, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
203 P4_ESCR(DAC_ESCR1, 0x3A9, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
204 P4_ESCR(MOB_ESCR0, 0x3AA, BPU_COUNTER0, BPU_COUNTER1, NONE) \
205 P4_ESCR(MOB_ESCR1, 0x3AB, BPU_COUNTER2, BPU_COUNTER3, NONE) \
206 P4_ESCR(PMH_ESCR0, 0x3AC, BPU_COUNTER0, BPU_COUNTER1, NONE) \
207 P4_ESCR(PMH_ESCR1, 0x3AD, BPU_COUNTER2, BPU_COUNTER3, NONE) \
208 P4_ESCR(SAAT_ESCR0, 0x3AE, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
209 P4_ESCR(SAAT_ESCR1, 0x3AF, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
210 P4_ESCR(U2L_ESCR0, 0x3B0, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
211 P4_ESCR(U2L_ESCR1, 0x3B1, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
212 P4_ESCR(BPU_ESCR0, 0x3B2, BPU_COUNTER0, BPU_COUNTER1, NONE) \
213 P4_ESCR(BPU_ESCR1, 0x3B3, BPU_COUNTER2, BPU_COUNTER3, NONE) \
214 P4_ESCR(IS_ESCR0, 0x3B4, BPU_COUNTER0, BPU_COUNTER1, NONE) \
215 P4_ESCR(IS_ESCR1, 0x3B5, BPU_COUNTER2, BPU_COUNTER3, NONE) \
216 P4_ESCR(ITLB_ESCR0, 0x3B6, BPU_COUNTER0, BPU_COUNTER1, NONE) \
217 P4_ESCR(ITLB_ESCR1, 0x3B7, BPU_COUNTER2, BPU_COUNTER3, NONE) \
218 P4_ESCR(CRU_ESCR0, 0x3B8, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
219 P4_ESCR(CRU_ESCR1, 0x3B9, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
220 P4_ESCR(IQ_ESCR0, 0x3BA, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
221 P4_ESCR(IQ_ESCR1, 0x3BB, IQ_COUNTER1, IQ_COUNTER3, IQ_COUNTER5) \
222 P4_ESCR(RAT_ESCR0, 0x3BC, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
223 P4_ESCR(RAT_ESCR1, 0x3BD, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
224 P4_ESCR(SSU_ESCR0, 0x3BE, IQ_COUNTER0, IQ_COUNTER2, IQ_COUNTER4) \
225 P4_ESCR(MS_ESCR0, 0x3C0, MS_COUNTER0, MS_COUNTER1, NONE) \
226 P4_ESCR(MS_ESCR1, 0x3C1, MS_COUNTER2, MS_COUNTER3, NONE) \
227 P4_ESCR(TBPU_ESCR0, 0x3C2, MS_COUNTER0, MS_COUNTER1, NONE) \
228 P4_ESCR(TBPU_ESCR1, 0x3C3, MS_COUNTER2, MS_COUNTER3, NONE) \
229 P4_ESCR(TC_ESCR0, 0x3C4, MS_COUNTER0, MS_COUNTER1, NONE) \
230 P4_ESCR(TC_ESCR1, 0x3C5, MS_COUNTER2, MS_COUNTER3, NONE) \
231 P4_ESCR(IX_ESCR0, 0x3C8, BPU_COUNTER0, BPU_COUNTER1, NONE) \
232 P4_ESCR(IX_ESCR1, 0x3C9, BPU_COUNTER2, BPU_COUNTER3, NONE) \
233 P4_ESCR(ALF_ESCR0, 0x3CA, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
234 P4_ESCR(ALF_ESCR1, 0x3CB, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
235 P4_ESCR(CRU_ESCR2, 0x3CC, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
236 P4_ESCR(CRU_ESCR3, 0x3CD, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
237 P4_ESCR(CRU_ESCR4, 0x3E0, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
238 P4_ESCR(CRU_ESCR5, 0x3E1, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
239 P4_ESCR(NONE, ~0, NONE, NONE, NONE)
242 #define P4_ESCR(N, MSR, P1, P2, P3) P4_ESCR_##N ,
247 struct pmc_p4escr_descr {
248 const char pm_escrname[PMC_NAME_MAX];
250 const enum pmc_p4pmc pm_pmcs[P4_MAX_PMC_PER_ESCR];
253 static struct pmc_p4escr_descr p4_escrs[] =
255 #define P4_ESCR(N, MSR, P1, P2, P3) \
258 .pm_escr_msr = (MSR), \
273 * P4 Event descriptor
276 struct p4_event_descr {
277 const enum pmc_event pm_event;
278 const uint32_t pm_escr_eventselect;
279 const uint32_t pm_cccr_select;
280 const char pm_is_ti_event;
281 enum pmc_p4escr pm_escrs[P4_MAX_ESCR_PER_EVENT];
284 static struct p4_event_descr p4_events[] = {
286 #define P4_EVDESCR(NAME, ESCREVENTSEL, CCCRSEL, TI_EVENT, ESCR0, ESCR1) \
288 .pm_event = PMC_EV_P4_##NAME, \
289 .pm_escr_eventselect = (ESCREVENTSEL), \
290 .pm_cccr_select = (CCCRSEL), \
291 .pm_is_ti_event = (TI_EVENT), \
299 P4_EVDESCR(TC_DELIVER_MODE, 0x01, 0x01, TRUE, TC_ESCR0, TC_ESCR1),
300 P4_EVDESCR(BPU_FETCH_REQUEST, 0x03, 0x00, FALSE, BPU_ESCR0, BPU_ESCR1),
301 P4_EVDESCR(ITLB_REFERENCE, 0x18, 0x03, FALSE, ITLB_ESCR0, ITLB_ESCR1),
302 P4_EVDESCR(MEMORY_CANCEL, 0x02, 0x05, FALSE, DAC_ESCR0, DAC_ESCR1),
303 P4_EVDESCR(MEMORY_COMPLETE, 0x08, 0x02, FALSE, SAAT_ESCR0, SAAT_ESCR1),
304 P4_EVDESCR(LOAD_PORT_REPLAY, 0x04, 0x02, FALSE, SAAT_ESCR0, SAAT_ESCR1),
305 P4_EVDESCR(STORE_PORT_REPLAY, 0x05, 0x02, FALSE, SAAT_ESCR0, SAAT_ESCR1),
306 P4_EVDESCR(MOB_LOAD_REPLAY, 0x03, 0x02, FALSE, MOB_ESCR0, MOB_ESCR1),
307 P4_EVDESCR(PAGE_WALK_TYPE, 0x01, 0x04, TRUE, PMH_ESCR0, PMH_ESCR1),
308 P4_EVDESCR(BSQ_CACHE_REFERENCE, 0x0C, 0x07, FALSE, BSU_ESCR0, BSU_ESCR1),
309 P4_EVDESCR(IOQ_ALLOCATION, 0x03, 0x06, FALSE, FSB_ESCR0, FSB_ESCR1),
310 P4_EVDESCR(IOQ_ACTIVE_ENTRIES, 0x1A, 0x06, FALSE, FSB_ESCR1, NONE),
311 P4_EVDESCR(FSB_DATA_ACTIVITY, 0x17, 0x06, TRUE, FSB_ESCR0, FSB_ESCR1),
312 P4_EVDESCR(BSQ_ALLOCATION, 0x05, 0x07, FALSE, BSU_ESCR0, NONE),
313 P4_EVDESCR(BSQ_ACTIVE_ENTRIES, 0x06, 0x07, FALSE, BSU_ESCR1, NONE),
314 /* BSQ_ACTIVE_ENTRIES inherits CPU specificity from BSQ_ALLOCATION */
315 P4_EVDESCR(SSE_INPUT_ASSIST, 0x34, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
316 P4_EVDESCR(PACKED_SP_UOP, 0x08, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
317 P4_EVDESCR(PACKED_DP_UOP, 0x0C, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
318 P4_EVDESCR(SCALAR_SP_UOP, 0x0A, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
319 P4_EVDESCR(SCALAR_DP_UOP, 0x0E, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
320 P4_EVDESCR(64BIT_MMX_UOP, 0x02, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
321 P4_EVDESCR(128BIT_MMX_UOP, 0x1A, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
322 P4_EVDESCR(X87_FP_UOP, 0x04, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
323 P4_EVDESCR(X87_SIMD_MOVES_UOP, 0x2E, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
324 P4_EVDESCR(GLOBAL_POWER_EVENTS, 0x13, 0x06, FALSE, FSB_ESCR0, FSB_ESCR1),
325 P4_EVDESCR(TC_MS_XFER, 0x05, 0x00, FALSE, MS_ESCR0, MS_ESCR1),
326 P4_EVDESCR(UOP_QUEUE_WRITES, 0x09, 0x00, FALSE, MS_ESCR0, MS_ESCR1),
327 P4_EVDESCR(RETIRED_MISPRED_BRANCH_TYPE,
328 0x05, 0x02, FALSE, TBPU_ESCR0, TBPU_ESCR1),
329 P4_EVDESCR(RETIRED_BRANCH_TYPE, 0x04, 0x02, FALSE, TBPU_ESCR0, TBPU_ESCR1),
330 P4_EVDESCR(RESOURCE_STALL, 0x01, 0x01, FALSE, ALF_ESCR0, ALF_ESCR1),
331 P4_EVDESCR(WC_BUFFER, 0x05, 0x05, TRUE, DAC_ESCR0, DAC_ESCR1),
332 P4_EVDESCR(B2B_CYCLES, 0x16, 0x03, TRUE, FSB_ESCR0, FSB_ESCR1),
333 P4_EVDESCR(BNR, 0x08, 0x03, TRUE, FSB_ESCR0, FSB_ESCR1),
334 P4_EVDESCR(SNOOP, 0x06, 0x03, TRUE, FSB_ESCR0, FSB_ESCR1),
335 P4_EVDESCR(RESPONSE, 0x04, 0x03, TRUE, FSB_ESCR0, FSB_ESCR1),
336 P4_EVDESCR(FRONT_END_EVENT, 0x08, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
337 P4_EVDESCR(EXECUTION_EVENT, 0x0C, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
338 P4_EVDESCR(REPLAY_EVENT, 0x09, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
339 P4_EVDESCR(INSTR_RETIRED, 0x02, 0x04, FALSE, CRU_ESCR0, CRU_ESCR1),
340 P4_EVDESCR(UOPS_RETIRED, 0x01, 0x04, FALSE, CRU_ESCR0, CRU_ESCR1),
341 P4_EVDESCR(UOP_TYPE, 0x02, 0x02, FALSE, RAT_ESCR0, RAT_ESCR1),
342 P4_EVDESCR(BRANCH_RETIRED, 0x06, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
343 P4_EVDESCR(MISPRED_BRANCH_RETIRED, 0x03, 0x04, FALSE, CRU_ESCR0, CRU_ESCR1),
344 P4_EVDESCR(X87_ASSIST, 0x03, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
345 P4_EVDESCR(MACHINE_CLEAR, 0x02, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3)
350 #define P4_EVENT_IS_TI(E) ((E)->pm_is_ti_event == TRUE)
352 #define P4_NEVENTS (PMC_EV_P4_LAST - PMC_EV_P4_FIRST + 1)
359 struct pmc_descr pm_descr; /* common information */
360 enum pmc_p4pmc pm_pmcnum; /* PMC number */
361 uint32_t pm_pmc_msr; /* PERFCTR MSR address */
362 uint32_t pm_cccr_msr; /* CCCR MSR address */
365 static struct p4pmc_descr p4_pmcdesc[P4_NPMCS] = {
366 #define P4_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | PMC_CAP_SYSTEM | \
367 PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
368 PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE | \
369 PMC_CAP_TAGGING | PMC_CAP_CASCADE)
371 #define P4_PMCDESCR(N, PMC, CCCR) \
376 .pd_class = PMC_CLASS_P4, \
377 .pd_caps = P4_PMC_CAPS, \
380 .pm_pmcnum = P4_PMC_##N, \
381 .pm_cccr_msr = (CCCR), \
382 .pm_pmc_msr = (PMC) \
385 P4_PMCDESCR(BPU_COUNTER0, 0x300, 0x360),
386 P4_PMCDESCR(BPU_COUNTER1, 0x301, 0x361),
387 P4_PMCDESCR(BPU_COUNTER2, 0x302, 0x362),
388 P4_PMCDESCR(BPU_COUNTER3, 0x303, 0x363),
389 P4_PMCDESCR(MS_COUNTER0, 0x304, 0x364),
390 P4_PMCDESCR(MS_COUNTER1, 0x305, 0x365),
391 P4_PMCDESCR(MS_COUNTER2, 0x306, 0x366),
392 P4_PMCDESCR(MS_COUNTER3, 0x307, 0x367),
393 P4_PMCDESCR(FLAME_COUNTER0, 0x308, 0x368),
394 P4_PMCDESCR(FLAME_COUNTER1, 0x309, 0x369),
395 P4_PMCDESCR(FLAME_COUNTER2, 0x30A, 0x36A),
396 P4_PMCDESCR(FLAME_COUNTER3, 0x30B, 0x36B),
397 P4_PMCDESCR(IQ_COUNTER0, 0x30C, 0x36C),
398 P4_PMCDESCR(IQ_COUNTER1, 0x30D, 0x36D),
399 P4_PMCDESCR(IQ_COUNTER2, 0x30E, 0x36E),
400 P4_PMCDESCR(IQ_COUNTER3, 0x30F, 0x36F),
401 P4_PMCDESCR(IQ_COUNTER4, 0x310, 0x370),
402 P4_PMCDESCR(IQ_COUNTER5, 0x311, 0x371),
408 #define P4_NHTT 2 /* logical processors/chip */
410 static int p4_system_has_htt;
413 * Per-CPU data structure for P4 class CPUs
415 * [19 struct pmc_hw structures]
416 * [45 ESCRs status bytes]
417 * [per-cpu spin mutex]
418 * [19 flag fields for holding config flags and a runcount]
419 * [19*2 hw value fields] (Thread mode PMC support)
421 * [19*2 EIP values] (Sampling mode PMCs)
422 * [19*2 pmc value fields] (Thread mode PMC support))
426 struct pmc_hw pc_p4pmcs[P4_NPMCS];
427 char pc_escrs[P4_NESCR];
428 struct mtx pc_mtx; /* spin lock */
429 uint32_t pc_intrflag; /* NMI handler flags */
430 unsigned int pc_intrlock; /* NMI handler spin lock */
431 unsigned char pc_flags[P4_NPMCS]; /* 4 bits each: {cfg,run}count */
433 pmc_value_t pc_hw[P4_NPMCS * P4_NHTT];
434 uintptr_t pc_ip[P4_NPMCS * P4_NHTT];
436 pmc_value_t pc_pmc_values[P4_NPMCS * P4_NHTT];
439 static struct p4_cpu **p4_pcpu;
441 #define P4_PCPU_PMC_VALUE(PC,RI,CPU) (PC)->pc_pmc_values[(RI)*((CPU) & 1)]
442 #define P4_PCPU_HW_VALUE(PC,RI,CPU) (PC)->pc_si.pc_hw[(RI)*((CPU) & 1)]
443 #define P4_PCPU_SAVED_IP(PC,RI,CPU) (PC)->pc_si.pc_ip[(RI)*((CPU) & 1)]
445 #define P4_PCPU_GET_FLAGS(PC,RI,MASK) ((PC)->pc_flags[(RI)] & (MASK))
446 #define P4_PCPU_SET_FLAGS(PC,RI,MASK,VAL) do { \
448 _tmp = (PC)->pc_flags[(RI)]; \
450 _tmp |= (VAL) & (MASK); \
451 (PC)->pc_flags[(RI)] = _tmp; \
454 #define P4_PCPU_GET_RUNCOUNT(PC,RI) P4_PCPU_GET_FLAGS(PC,RI,0x0F)
455 #define P4_PCPU_SET_RUNCOUNT(PC,RI,V) P4_PCPU_SET_FLAGS(PC,RI,0x0F,V)
457 #define P4_PCPU_GET_CFGFLAGS(PC,RI) (P4_PCPU_GET_FLAGS(PC,RI,0xF0) >> 4)
458 #define P4_PCPU_SET_CFGFLAGS(PC,RI,C) P4_PCPU_SET_FLAGS(PC,RI,0xF0,((C) <<4))
460 #define P4_CPU_TO_FLAG(C) (P4_CPU_IS_HTT_SECONDARY(cpu) ? 0x2 : 0x1)
462 #define P4_PCPU_GET_INTRFLAG(PC,I) ((PC)->pc_intrflag & (1 << (I)))
463 #define P4_PCPU_SET_INTRFLAG(PC,I,V) do { \
467 (PC)->pc_intrflag |= __mask; \
469 (PC)->pc_intrflag &= ~__mask; \
473 * A minimal spin lock implementation for use inside the NMI handler.
475 * We don't want to use a regular spin lock here, because curthread
476 * may not be consistent at the time the handler is invoked.
478 #define P4_PCPU_ACQ_INTR_SPINLOCK(PC) do { \
479 while (!atomic_cmpset_acq_int(&pc->pc_intrlock, 0, 1)) \
482 #define P4_PCPU_REL_INTR_SPINLOCK(PC) \
483 atomic_store_rel_int(&pc->pc_intrlock, 0);
485 /* ESCR row disposition */
486 static int p4_escrdisp[P4_NESCR];
488 #define P4_ESCR_ROW_DISP_IS_THREAD(E) (p4_escrdisp[(E)] > 0)
489 #define P4_ESCR_ROW_DISP_IS_STANDALONE(E) (p4_escrdisp[(E)] < 0)
490 #define P4_ESCR_ROW_DISP_IS_FREE(E) (p4_escrdisp[(E)] == 0)
492 #define P4_ESCR_MARK_ROW_STANDALONE(E) do { \
493 KASSERT(p4_escrdisp[(E)] <= 0, ("[p4,%d] row disposition error",\
495 atomic_add_int(&p4_escrdisp[(E)], -1); \
496 KASSERT(p4_escrdisp[(E)] >= (-pmc_cpu_max_active()), \
497 ("[p4,%d] row disposition error", __LINE__)); \
500 #define P4_ESCR_UNMARK_ROW_STANDALONE(E) do { \
501 atomic_add_int(&p4_escrdisp[(E)], 1); \
502 KASSERT(p4_escrdisp[(E)] <= 0, ("[p4,%d] row disposition error",\
506 #define P4_ESCR_MARK_ROW_THREAD(E) do { \
507 KASSERT(p4_escrdisp[(E)] >= 0, ("[p4,%d] row disposition error", \
509 atomic_add_int(&p4_escrdisp[(E)], 1); \
512 #define P4_ESCR_UNMARK_ROW_THREAD(E) do { \
513 atomic_add_int(&p4_escrdisp[(E)], -1); \
514 KASSERT(p4_escrdisp[(E)] >= 0, ("[p4,%d] row disposition error", \
518 #define P4_PMC_IS_STOPPED(cccr) ((rdmsr(cccr) & P4_CCCR_ENABLE) == 0)
520 #define P4_CPU_IS_HTT_SECONDARY(cpu) \
521 (p4_system_has_htt ? ((cpu) & 1) : 0)
522 #define P4_TO_HTT_PRIMARY(cpu) \
523 (p4_system_has_htt ? ((cpu) & ~1) : (cpu))
525 #define P4_CCCR_Tx_MASK (~(P4_CCCR_OVF_PMI_T0|P4_CCCR_OVF_PMI_T1| \
526 P4_CCCR_ENABLE|P4_CCCR_OVF))
527 #define P4_ESCR_Tx_MASK (~(P4_ESCR_T0_OS|P4_ESCR_T0_USR|P4_ESCR_T1_OS| \
534 static struct p4_event_descr *
535 p4_find_event(enum pmc_event ev)
539 for (n = 0; n < P4_NEVENTS; n++)
540 if (p4_events[n].pm_event == ev)
544 return (&p4_events[n]);
548 * Initialize per-cpu state
552 p4_pcpu_init(struct pmc_mdep *md, int cpu)
555 int n, first_ri, phycpu;
558 struct pmc_cpu *pc, *plc;
560 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
561 ("[p4,%d] insane cpu number %d", __LINE__, cpu));
563 PMCDBG(MDP,INI,0, "p4-init cpu=%d is-primary=%d", cpu,
564 pmc_cpu_is_primary(cpu) != 0);
566 first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_P4].pcd_ri;
569 * The two CPUs in an HT pair share their per-cpu state.
571 * For HT capable CPUs, we assume that the two logical
572 * processors in the HT pair get two consecutive CPU ids
573 * starting with an even id #.
575 * The primary CPU (the even numbered CPU of the pair) would
576 * have been initialized prior to the initialization for the
580 if (!pmc_cpu_is_primary(cpu) && (cpu & 1)) {
582 p4_system_has_htt = 1;
584 phycpu = P4_TO_HTT_PRIMARY(cpu);
585 pc = pmc_pcpu[phycpu];
588 KASSERT(plc != pc, ("[p4,%d] per-cpu config error", __LINE__));
590 PMCDBG(MDP,INI,1, "p4-init cpu=%d phycpu=%d pc=%p", cpu,
592 KASSERT(pc, ("[p4,%d] Null Per-Cpu state cpu=%d phycpu=%d",
593 __LINE__, cpu, phycpu));
595 /* PMCs are shared with the physical CPU. */
596 for (n = 0; n < P4_NPMCS; n++)
597 plc->pc_hwpmcs[n + first_ri] =
598 pc->pc_hwpmcs[n + first_ri];
603 p4c = malloc(sizeof(struct p4_cpu), M_PMC, M_WAITOK|M_ZERO);
610 KASSERT(pc != NULL, ("[p4,%d] cpu %d null per-cpu", __LINE__, cpu));
613 phw = p4c->pc_p4pmcs;
615 for (n = 0; n < P4_NPMCS; n++, phw++) {
616 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
617 PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
619 pc->pc_hwpmcs[n + first_ri] = phw;
622 pescr = p4c->pc_escrs;
623 for (n = 0; n < P4_NESCR; n++)
624 *pescr++ = P4_INVALID_PMC_INDEX;
626 mtx_init(&p4c->pc_mtx, "p4-pcpu", "pmc-leaf", MTX_SPIN);
632 * Destroy per-cpu state.
636 p4_pcpu_fini(struct pmc_mdep *md, int cpu)
642 PMCDBG(MDP,INI,0, "p4-cleanup cpu=%d", cpu);
645 first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_P4].pcd_ri;
647 for (i = 0; i < P4_NPMCS; i++)
648 pc->pc_hwpmcs[i + first_ri] = NULL;
650 if (!pmc_cpu_is_primary(cpu) && (cpu & 1))
655 KASSERT(p4c != NULL, ("[p4,%d] NULL pcpu", __LINE__));
657 /* Turn off all PMCs on this CPU */
658 for (i = 0; i < P4_NPMCS - 1; i++)
659 wrmsr(P4_CCCR_MSR_FIRST + i,
660 rdmsr(P4_CCCR_MSR_FIRST + i) & ~P4_CCCR_ENABLE);
662 mtx_destroy(&p4c->pc_mtx);
676 p4_read_pmc(int cpu, int ri, pmc_value_t *v)
682 struct p4pmc_descr *pd;
684 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
685 ("[p4,%d] illegal CPU value %d", __LINE__, cpu));
686 KASSERT(ri >= 0 && ri < P4_NPMCS,
687 ("[p4,%d] illegal row-index %d", __LINE__, ri));
689 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
690 pm = pc->pc_p4pmcs[ri].phw_pmc;
691 pd = &p4_pmcdesc[ri];
694 ("[p4,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__, cpu, ri));
696 KASSERT(pd->pm_descr.pd_class == PMC_TO_CLASS(pm),
697 ("[p4,%d] class mismatch pd %d != id class %d", __LINE__,
698 pd->pm_descr.pd_class, PMC_TO_CLASS(pm)));
700 mode = PMC_TO_MODE(pm);
702 PMCDBG(MDP,REA,1, "p4-read cpu=%d ri=%d mode=%d", cpu, ri, mode);
704 KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4,
705 ("[p4,%d] unknown PMC class %d", __LINE__, pd->pm_descr.pd_class));
707 tmp = rdmsr(p4_pmcdesc[ri].pm_pmc_msr);
709 if (PMC_IS_VIRTUAL_MODE(mode)) {
710 if (tmp < P4_PCPU_HW_VALUE(pc,ri,cpu)) /* 40 bit overflow */
711 tmp += (P4_PERFCTR_MASK + 1) -
712 P4_PCPU_HW_VALUE(pc,ri,cpu);
714 tmp -= P4_PCPU_HW_VALUE(pc,ri,cpu);
715 tmp += P4_PCPU_PMC_VALUE(pc,ri,cpu);
718 if (PMC_IS_SAMPLING_MODE(mode)) /* undo transformation */
719 *v = P4_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
723 PMCDBG(MDP,REA,2, "p4-read -> %jx", *v);
733 p4_write_pmc(int cpu, int ri, pmc_value_t v)
738 const struct pmc_hw *phw;
739 const struct p4pmc_descr *pd;
741 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
742 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
743 KASSERT(ri >= 0 && ri < P4_NPMCS,
744 ("[amd,%d] illegal row-index %d", __LINE__, ri));
746 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
747 phw = &pc->pc_p4pmcs[ri];
749 pd = &p4_pmcdesc[ri];
752 ("[p4,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
755 mode = PMC_TO_MODE(pm);
757 PMCDBG(MDP,WRI,1, "p4-write cpu=%d ri=%d mode=%d v=%jx", cpu, ri,
761 * write the PMC value to the register/saved value: for
762 * sampling mode PMCs, the value to be programmed into the PMC
763 * counter is -(C+1) where 'C' is the requested sample rate.
765 if (PMC_IS_SAMPLING_MODE(mode))
766 v = P4_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
768 if (PMC_IS_SYSTEM_MODE(mode))
769 wrmsr(pd->pm_pmc_msr, v);
771 P4_PCPU_PMC_VALUE(pc,ri,cpu) = v;
777 * Configure a PMC 'pm' on the given CPU and row-index.
779 * 'pm' may be NULL to indicate de-configuration.
781 * On HTT systems, a PMC may get configured twice, once for each
782 * "logical" CPU. We track this using the CFGFLAGS field of the
783 * per-cpu state; this field is a bit mask with one bit each for
784 * logical CPUs 0 & 1.
788 p4_config_pmc(int cpu, int ri, struct pmc *pm)
792 int cfgflags, cpuflag;
794 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
795 ("[p4,%d] illegal CPU %d", __LINE__, cpu));
797 KASSERT(ri >= 0 && ri < P4_NPMCS,
798 ("[p4,%d] illegal row-index %d", __LINE__, ri));
800 PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
802 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
803 phw = &pc->pc_p4pmcs[ri];
805 KASSERT(pm == NULL || phw->phw_pmc == NULL ||
806 (p4_system_has_htt && phw->phw_pmc == pm),
807 ("[p4,%d] hwpmc not unconfigured before re-config", __LINE__));
809 mtx_lock_spin(&pc->pc_mtx);
810 cfgflags = P4_PCPU_GET_CFGFLAGS(pc,ri);
812 KASSERT(cfgflags >= 0 || cfgflags <= 3,
813 ("[p4,%d] illegal cfgflags cfg=%d on cpu=%d ri=%d", __LINE__,
816 KASSERT(cfgflags == 0 || phw->phw_pmc,
817 ("[p4,%d] cpu=%d ri=%d pmc configured with zero cfg count",
820 cpuflag = P4_CPU_TO_FLAG(cpu);
822 if (pm) { /* config */
826 KASSERT(phw->phw_pmc == pm,
827 ("[p4,%d] cpu=%d ri=%d config %p != hw %p",
828 __LINE__, cpu, ri, pm, phw->phw_pmc));
831 } else { /* unconfig */
832 cfgflags &= ~cpuflag;
838 KASSERT(cfgflags >= 0 || cfgflags <= 3,
839 ("[p4,%d] illegal runcount cfg=%d on cpu=%d ri=%d", __LINE__,
842 P4_PCPU_SET_CFGFLAGS(pc,ri,cfgflags);
844 mtx_unlock_spin(&pc->pc_mtx);
850 * Retrieve a configured PMC pointer from hardware state.
854 p4_get_config(int cpu, int ri, struct pmc **ppm)
859 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
860 ("[p4,%d] illegal CPU %d", __LINE__, cpu));
861 KASSERT(ri >= 0 && ri < P4_NPMCS,
862 ("[p4,%d] illegal row-index %d", __LINE__, ri));
864 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
866 mtx_lock_spin(&pc->pc_mtx);
867 cfgflags = P4_PCPU_GET_CFGFLAGS(pc,ri);
868 mtx_unlock_spin(&pc->pc_mtx);
870 if (cfgflags & P4_CPU_TO_FLAG(cpu))
871 *ppm = pc->pc_p4pmcs[ri].phw_pmc; /* PMC config'ed on this CPU */
881 * The allocation strategy differs between HTT and non-HTT systems.
884 * - Given the desired event and the PMC row-index, lookup the
885 * list of valid ESCRs for the event.
886 * - For each valid ESCR:
887 * - Check if the ESCR is free and the ESCR row is in a compatible
888 * mode (i.e., system or process))
889 * - Check if the ESCR is usable with a P4 PMC at the desired row-index.
890 * If everything matches, we determine the appropriate bit values for the
891 * ESCR and CCCR registers.
895 * - Process mode PMCs require special care. The FreeBSD scheduler could
896 * schedule any two processes on the same physical CPU. We need to ensure
897 * that a given PMC row-index is never allocated to two different
898 * PMCs owned by different user-processes.
899 * This is ensured by always allocating a PMC from a 'FREE' PMC row
900 * if the system has HTT active.
901 * - A similar check needs to be done for ESCRs; we do not want two PMCs
902 * using the same ESCR to be scheduled at the same time. Thus ESCR
903 * allocation is also restricted to FREE rows if the system has HTT
905 * - Thirdly, some events are 'thread-independent' terminology, i.e.,
906 * the PMC hardware cannot distinguish between events caused by
907 * different logical CPUs. This makes it impossible to assign events
908 * to a given thread of execution. If the system has HTT enabled,
909 * these events are not allowed for process-mode PMCs.
913 p4_allocate_pmc(int cpu, int ri, struct pmc *pm,
914 const struct pmc_op_pmcallocate *a)
917 uint32_t caps, cccrvalue, escrvalue, tflags;
918 enum pmc_p4escr escr;
920 struct p4_event_descr *pevent;
921 const struct p4pmc_descr *pd;
923 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
924 ("[p4,%d] illegal CPU %d", __LINE__, cpu));
925 KASSERT(ri >= 0 && ri < P4_NPMCS,
926 ("[p4,%d] illegal row-index value %d", __LINE__, ri));
928 pd = &p4_pmcdesc[ri];
930 PMCDBG(MDP,ALL,1, "p4-allocate ri=%d class=%d pmccaps=0x%x "
931 "reqcaps=0x%x", ri, pd->pm_descr.pd_class, pd->pm_descr.pd_caps,
935 if (pd->pm_descr.pd_class != a->pm_class)
938 /* check requested capabilities */
940 if ((pd->pm_descr.pd_caps & caps) != caps)
944 * If the system has HTT enabled, and the desired allocation
945 * mode is process-private, and the PMC row disposition is not
946 * FREE (0), decline the allocation.
949 if (p4_system_has_htt &&
950 PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) &&
951 pmc_getrowdisp(ri) != 0)
954 KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4,
955 ("[p4,%d] unknown PMC class %d", __LINE__,
956 pd->pm_descr.pd_class));
958 if (pm->pm_event < PMC_EV_P4_FIRST ||
959 pm->pm_event > PMC_EV_P4_LAST)
962 if ((pevent = p4_find_event(pm->pm_event)) == NULL)
965 PMCDBG(MDP,ALL,2, "pevent={ev=%d,escrsel=0x%x,cccrsel=0x%x,isti=%d}",
966 pevent->pm_event, pevent->pm_escr_eventselect,
967 pevent->pm_cccr_select, pevent->pm_is_ti_event);
970 * Some PMC events are 'thread independent'and therefore
971 * cannot be used for process-private modes if HTT is being
975 if (P4_EVENT_IS_TI(pevent) &&
976 PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) &&
980 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
984 /* look for a suitable ESCR for this event */
985 for (n = 0; n < P4_MAX_ESCR_PER_EVENT && !found; n++) {
986 if ((escr = pevent->pm_escrs[n]) == P4_ESCR_NONE)
987 break; /* out of ESCRs */
989 * Check ESCR row disposition.
991 * If the request is for a system-mode PMC, then the
992 * ESCR row should not be in process-virtual mode, and
993 * should also be free on the current CPU.
996 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
997 if (P4_ESCR_ROW_DISP_IS_THREAD(escr) ||
998 pc->pc_escrs[escr] != P4_INVALID_PMC_INDEX)
1003 * If the request is for a process-virtual PMC, and if
1004 * HTT is not enabled, we can use an ESCR row that is
1005 * either FREE or already in process mode.
1007 * If HTT is enabled, then we need to ensure that a
1008 * given ESCR is never allocated to two PMCS that
1009 * could run simultaneously on the two logical CPUs of
1010 * a CPU package. We ensure this be only allocating
1011 * ESCRs from rows marked as 'FREE'.
1014 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) {
1015 if (p4_system_has_htt) {
1016 if (!P4_ESCR_ROW_DISP_IS_FREE(escr))
1019 if (P4_ESCR_ROW_DISP_IS_STANDALONE(escr))
1024 * We found a suitable ESCR for this event. Now check if
1025 * this escr can work with the PMC at row-index 'ri'.
1028 for (m = 0; m < P4_MAX_PMC_PER_ESCR; m++)
1029 if (p4_escrs[escr].pm_pmcs[m] == pd->pm_pmcnum) {
1038 KASSERT((int) escr >= 0 && escr < P4_NESCR,
1039 ("[p4,%d] illegal ESCR value %d", __LINE__, escr));
1041 /* mark ESCR row mode */
1042 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
1043 pc->pc_escrs[escr] = ri; /* mark ESCR as in use on this cpu */
1044 P4_ESCR_MARK_ROW_STANDALONE(escr);
1046 KASSERT(pc->pc_escrs[escr] == P4_INVALID_PMC_INDEX,
1047 ("[p4,%d] escr[%d] already in use", __LINE__, escr));
1048 P4_ESCR_MARK_ROW_THREAD(escr);
1051 pm->pm_md.pm_p4.pm_p4_escrmsr = p4_escrs[escr].pm_escr_msr;
1052 pm->pm_md.pm_p4.pm_p4_escr = escr;
1054 cccrvalue = P4_CCCR_TO_ESCR_SELECT(pevent->pm_cccr_select);
1055 escrvalue = P4_ESCR_TO_EVENT_SELECT(pevent->pm_escr_eventselect);
1058 if (caps & PMC_CAP_THRESHOLD)
1059 cccrvalue |= (a->pm_md.pm_p4.pm_p4_cccrconfig &
1060 P4_CCCR_THRESHOLD_MASK) | P4_CCCR_COMPARE;
1062 if (caps & PMC_CAP_EDGE)
1063 cccrvalue |= P4_CCCR_EDGE;
1065 if (caps & PMC_CAP_INVERT)
1066 cccrvalue |= P4_CCCR_COMPLEMENT;
1068 if (p4_system_has_htt)
1069 cccrvalue |= a->pm_md.pm_p4.pm_p4_cccrconfig &
1070 P4_CCCR_ACTIVE_THREAD_MASK;
1071 else /* no HTT; thread field should be '11b' */
1072 cccrvalue |= P4_CCCR_TO_ACTIVE_THREAD(0x3);
1074 if (caps & PMC_CAP_CASCADE)
1075 cccrvalue |= P4_CCCR_CASCADE;
1077 /* On HTT systems the PMI T0 field may get moved to T1 at pmc start */
1078 if (caps & PMC_CAP_INTERRUPT)
1079 cccrvalue |= P4_CCCR_OVF_PMI_T0;
1082 if (caps & PMC_CAP_QUALIFIER)
1083 escrvalue |= a->pm_md.pm_p4.pm_p4_escrconfig &
1084 P4_ESCR_EVENT_MASK_MASK;
1085 if (caps & PMC_CAP_TAGGING)
1086 escrvalue |= (a->pm_md.pm_p4.pm_p4_escrconfig &
1087 P4_ESCR_TAG_VALUE_MASK) | P4_ESCR_TAG_ENABLE;
1088 if (caps & PMC_CAP_QUALIFIER)
1089 escrvalue |= (a->pm_md.pm_p4.pm_p4_escrconfig &
1090 P4_ESCR_EVENT_MASK_MASK);
1092 /* HTT: T0_{OS,USR} bits may get moved to T1 at pmc start */
1094 if (caps & PMC_CAP_SYSTEM)
1095 tflags |= P4_ESCR_T0_OS;
1096 if (caps & PMC_CAP_USER)
1097 tflags |= P4_ESCR_T0_USR;
1099 tflags = (P4_ESCR_T0_OS|P4_ESCR_T0_USR);
1100 escrvalue |= tflags;
1102 pm->pm_md.pm_p4.pm_p4_cccrvalue = cccrvalue;
1103 pm->pm_md.pm_p4.pm_p4_escrvalue = escrvalue;
1105 PMCDBG(MDP,ALL,2, "p4-allocate cccrsel=0x%x cccrval=0x%x "
1106 "escr=%d escrmsr=0x%x escrval=0x%x", pevent->pm_cccr_select,
1107 cccrvalue, escr, pm->pm_md.pm_p4.pm_p4_escrmsr, escrvalue);
1117 p4_release_pmc(int cpu, int ri, struct pmc *pm)
1119 enum pmc_p4escr escr;
1122 KASSERT(ri >= 0 && ri < P4_NPMCS,
1123 ("[p4,%d] illegal row-index %d", __LINE__, ri));
1125 escr = pm->pm_md.pm_p4.pm_p4_escr;
1127 PMCDBG(MDP,REL,1, "p4-release cpu=%d ri=%d escr=%d", cpu, ri, escr);
1129 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
1130 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
1132 KASSERT(pc->pc_p4pmcs[ri].phw_pmc == NULL,
1133 ("[p4,%d] releasing configured PMC ri=%d", __LINE__, ri));
1135 P4_ESCR_UNMARK_ROW_STANDALONE(escr);
1136 KASSERT(pc->pc_escrs[escr] == ri,
1137 ("[p4,%d] escr[%d] not allocated to ri %d", __LINE__,
1139 pc->pc_escrs[escr] = P4_INVALID_PMC_INDEX; /* mark as free */
1141 P4_ESCR_UNMARK_ROW_THREAD(escr);
1151 p4_start_pmc(int cpu, int ri)
1156 struct p4pmc_descr *pd;
1157 uint32_t cccrvalue, cccrtbits, escrvalue, escrmsr, escrtbits;
1159 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1160 ("[p4,%d] illegal CPU value %d", __LINE__, cpu));
1161 KASSERT(ri >= 0 && ri < P4_NPMCS,
1162 ("[p4,%d] illegal row-index %d", __LINE__, ri));
1164 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
1165 pm = pc->pc_p4pmcs[ri].phw_pmc;
1166 pd = &p4_pmcdesc[ri];
1169 ("[p4,%d] starting cpu%d,pmc%d with null pmc", __LINE__, cpu, ri));
1171 PMCDBG(MDP,STA,1, "p4-start cpu=%d ri=%d", cpu, ri);
1173 KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4,
1174 ("[p4,%d] wrong PMC class %d", __LINE__,
1175 pd->pm_descr.pd_class));
1177 /* retrieve the desired CCCR/ESCR values from the PMC */
1178 cccrvalue = pm->pm_md.pm_p4.pm_p4_cccrvalue;
1179 escrvalue = pm->pm_md.pm_p4.pm_p4_escrvalue;
1180 escrmsr = pm->pm_md.pm_p4.pm_p4_escrmsr;
1182 /* extract and zero the logical processor selection bits */
1183 cccrtbits = cccrvalue & P4_CCCR_OVF_PMI_T0;
1184 escrtbits = escrvalue & (P4_ESCR_T0_OS|P4_ESCR_T0_USR);
1185 cccrvalue &= ~P4_CCCR_OVF_PMI_T0;
1186 escrvalue &= ~(P4_ESCR_T0_OS|P4_ESCR_T0_USR);
1188 if (P4_CPU_IS_HTT_SECONDARY(cpu)) { /* shift T0 bits to T1 position */
1193 /* start system mode PMCs directly */
1194 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
1195 wrmsr(escrmsr, escrvalue | escrtbits);
1196 wrmsr(pd->pm_cccr_msr, cccrvalue | cccrtbits | P4_CCCR_ENABLE);
1203 * On HTT machines, the same PMC could be scheduled on the
1204 * same physical CPU twice (once for each logical CPU), for
1205 * example, if two threads of a multi-threaded process get
1206 * scheduled on the same CPU.
1210 mtx_lock_spin(&pc->pc_mtx);
1212 rc = P4_PCPU_GET_RUNCOUNT(pc,ri);
1213 KASSERT(rc == 0 || rc == 1,
1214 ("[p4,%d] illegal runcount cpu=%d ri=%d rc=%d", __LINE__, cpu, ri,
1217 if (rc == 0) { /* 1st CPU and the non-HTT case */
1219 KASSERT(P4_PMC_IS_STOPPED(pd->pm_cccr_msr),
1220 ("[p4,%d] cpu=%d ri=%d cccr=0x%x not stopped", __LINE__,
1221 cpu, ri, pd->pm_cccr_msr));
1223 /* write out the low 40 bits of the saved value to hardware */
1224 wrmsr(pd->pm_pmc_msr,
1225 P4_PCPU_PMC_VALUE(pc,ri,cpu) & P4_PERFCTR_MASK);
1227 } else if (rc == 1) { /* 2nd CPU */
1230 * Stop the PMC and retrieve the CCCR and ESCR values
1231 * from their MSRs, and turn on the additional T[0/1]
1232 * bits for the 2nd CPU.
1235 cccrvalue = rdmsr(pd->pm_cccr_msr);
1236 wrmsr(pd->pm_cccr_msr, cccrvalue & ~P4_CCCR_ENABLE);
1238 /* check that the configuration bits read back match the PMC */
1239 KASSERT((cccrvalue & P4_CCCR_Tx_MASK) ==
1240 (pm->pm_md.pm_p4.pm_p4_cccrvalue & P4_CCCR_Tx_MASK),
1241 ("[p4,%d] Extra CCCR bits cpu=%d rc=%d ri=%d "
1242 "cccr=0x%x PMC=0x%x", __LINE__, cpu, rc, ri,
1243 cccrvalue & P4_CCCR_Tx_MASK,
1244 pm->pm_md.pm_p4.pm_p4_cccrvalue & P4_CCCR_Tx_MASK));
1245 KASSERT(cccrvalue & P4_CCCR_ENABLE,
1246 ("[p4,%d] 2nd cpu rc=%d cpu=%d ri=%d not running",
1247 __LINE__, rc, cpu, ri));
1248 KASSERT((cccrvalue & cccrtbits) == 0,
1249 ("[p4,%d] CCCR T0/T1 mismatch rc=%d cpu=%d ri=%d"
1250 "cccrvalue=0x%x tbits=0x%x", __LINE__, rc, cpu, ri,
1251 cccrvalue, cccrtbits));
1253 escrvalue = rdmsr(escrmsr);
1255 KASSERT((escrvalue & P4_ESCR_Tx_MASK) ==
1256 (pm->pm_md.pm_p4.pm_p4_escrvalue & P4_ESCR_Tx_MASK),
1257 ("[p4,%d] Extra ESCR bits cpu=%d rc=%d ri=%d "
1258 "escr=0x%x pm=0x%x", __LINE__, cpu, rc, ri,
1259 escrvalue & P4_ESCR_Tx_MASK,
1260 pm->pm_md.pm_p4.pm_p4_escrvalue & P4_ESCR_Tx_MASK));
1261 KASSERT((escrvalue & escrtbits) == 0,
1262 ("[p4,%d] ESCR T0/T1 mismatch rc=%d cpu=%d ri=%d "
1263 "escrmsr=0x%x escrvalue=0x%x tbits=0x%x", __LINE__,
1264 rc, cpu, ri, escrmsr, escrvalue, escrtbits));
1267 /* Enable the correct bits for this CPU. */
1268 escrvalue |= escrtbits;
1269 cccrvalue |= cccrtbits | P4_CCCR_ENABLE;
1271 /* Save HW value at the time of starting hardware */
1272 P4_PCPU_HW_VALUE(pc,ri,cpu) = rdmsr(pd->pm_pmc_msr);
1274 /* Program the ESCR and CCCR and start the PMC */
1275 wrmsr(escrmsr, escrvalue);
1276 wrmsr(pd->pm_cccr_msr, cccrvalue);
1279 P4_PCPU_SET_RUNCOUNT(pc,ri,rc);
1281 mtx_unlock_spin(&pc->pc_mtx);
1283 PMCDBG(MDP,STA,2,"p4-start cpu=%d rc=%d ri=%d escr=%d "
1284 "escrmsr=0x%x escrvalue=0x%x cccr_config=0x%x v=%jx", cpu, rc,
1285 ri, pm->pm_md.pm_p4.pm_p4_escr, escrmsr, escrvalue,
1286 cccrvalue, P4_PCPU_HW_VALUE(pc,ri,cpu));
1296 p4_stop_pmc(int cpu, int ri)
1299 uint32_t cccrvalue, cccrtbits, escrvalue, escrmsr, escrtbits;
1302 struct p4pmc_descr *pd;
1305 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1306 ("[p4,%d] illegal CPU value %d", __LINE__, cpu));
1307 KASSERT(ri >= 0 && ri < P4_NPMCS,
1308 ("[p4,%d] illegal row index %d", __LINE__, ri));
1310 pd = &p4_pmcdesc[ri];
1311 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
1312 pm = pc->pc_p4pmcs[ri].phw_pmc;
1315 ("[p4,%d] null pmc for cpu%d, ri%d", __LINE__, cpu, ri));
1317 PMCDBG(MDP,STO,1, "p4-stop cpu=%d ri=%d", cpu, ri);
1319 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
1320 wrmsr(pd->pm_cccr_msr,
1321 pm->pm_md.pm_p4.pm_p4_cccrvalue & ~P4_CCCR_ENABLE);
1328 * On HTT machines, this PMC may be in use by two threads
1329 * running on two logical CPUS. Thus we look at the
1330 * 'runcount' field and only turn off the appropriate TO/T1
1331 * bits (and keep the PMC running) if two logical CPUs were
1337 cccrtbits = P4_CCCR_OVF_PMI_T0;
1338 escrtbits = P4_ESCR_T0_OS | P4_ESCR_T0_USR;
1339 if (P4_CPU_IS_HTT_SECONDARY(cpu)) {
1344 mtx_lock_spin(&pc->pc_mtx);
1346 rc = P4_PCPU_GET_RUNCOUNT(pc,ri);
1348 KASSERT(rc == 2 || rc == 1,
1349 ("[p4,%d] illegal runcount cpu=%d ri=%d rc=%d", __LINE__, cpu, ri,
1354 P4_PCPU_SET_RUNCOUNT(pc,ri,rc);
1357 cccrvalue = rdmsr(pd->pm_cccr_msr);
1358 wrmsr(pd->pm_cccr_msr, cccrvalue & ~P4_CCCR_ENABLE);
1360 escrmsr = pm->pm_md.pm_p4.pm_p4_escrmsr;
1361 escrvalue = rdmsr(escrmsr);
1363 /* The current CPU should be running on this PMC */
1364 KASSERT(escrvalue & escrtbits,
1365 ("[p4,%d] ESCR T0/T1 mismatch cpu=%d rc=%d ri=%d escrmsr=0x%x "
1366 "escrvalue=0x%x tbits=0x%x", __LINE__, cpu, rc, ri, escrmsr,
1367 escrvalue, escrtbits));
1368 KASSERT(PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm)) ||
1369 (cccrvalue & cccrtbits),
1370 ("[p4,%d] CCCR T0/T1 mismatch cpu=%d ri=%d cccrvalue=0x%x "
1371 "tbits=0x%x", __LINE__, cpu, ri, cccrvalue, cccrtbits));
1373 /* get the current hardware reading */
1374 tmp = rdmsr(pd->pm_pmc_msr);
1376 if (rc == 1) { /* need to keep the PMC running */
1377 escrvalue &= ~escrtbits;
1378 cccrvalue &= ~cccrtbits;
1379 wrmsr(escrmsr, escrvalue);
1380 wrmsr(pd->pm_cccr_msr, cccrvalue);
1383 mtx_unlock_spin(&pc->pc_mtx);
1385 PMCDBG(MDP,STO,2, "p4-stop cpu=%d rc=%d ri=%d escrmsr=0x%x "
1386 "escrval=0x%x cccrval=0x%x v=%jx", cpu, rc, ri, escrmsr,
1387 escrvalue, cccrvalue, tmp);
1389 if (tmp < P4_PCPU_HW_VALUE(pc,ri,cpu)) /* 40 bit counter overflow */
1390 tmp += (P4_PERFCTR_MASK + 1) - P4_PCPU_HW_VALUE(pc,ri,cpu);
1392 tmp -= P4_PCPU_HW_VALUE(pc,ri,cpu);
1394 P4_PCPU_PMC_VALUE(pc,ri,cpu) += tmp;
1400 * Handle an interrupt.
1402 * The hardware sets the CCCR_OVF whenever a counter overflow occurs,
1403 * so the handler examines all the 18 CCCR registers, processing the
1404 * counters that have overflowed.
1406 * On HTT machines, the CCCR register is shared and will interrupt
1407 * both logical processors if so configured. Thus multiple logical
1408 * CPUs could enter the NMI service routine at the same time. These
1409 * will get serialized using a per-cpu spinlock dedicated for use in
1414 p4_intr(int cpu, struct trapframe *tf)
1416 uint32_t cccrval, ovf_mask, ovf_partner;
1417 int did_interrupt, error, ri;
1422 PMCDBG(MDP,INT, 1, "cpu=%d tf=0x%p um=%d", cpu, (void *) tf,
1423 TRAPF_USERMODE(tf));
1425 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
1427 ovf_mask = P4_CPU_IS_HTT_SECONDARY(cpu) ?
1428 P4_CCCR_OVF_PMI_T1 : P4_CCCR_OVF_PMI_T0;
1429 ovf_mask |= P4_CCCR_OVF;
1430 if (p4_system_has_htt)
1431 ovf_partner = P4_CPU_IS_HTT_SECONDARY(cpu) ?
1432 P4_CCCR_OVF_PMI_T0 : P4_CCCR_OVF_PMI_T1;
1437 if (p4_system_has_htt)
1438 P4_PCPU_ACQ_INTR_SPINLOCK(pc);
1441 * Loop through all CCCRs, looking for ones that have
1442 * interrupted this CPU.
1444 for (ri = 0; ri < P4_NPMCS; ri++) {
1447 * Check if our partner logical CPU has already marked
1448 * this PMC has having interrupted it. If so, reset
1449 * the flag and process the interrupt, but leave the
1452 if (p4_system_has_htt && P4_PCPU_GET_INTRFLAG(pc,ri)) {
1453 P4_PCPU_SET_INTRFLAG(pc,ri,0);
1457 * Ignore de-configured or stopped PMCs.
1458 * Ignore PMCs not in sampling mode.
1460 pm = pc->pc_p4pmcs[ri].phw_pmc;
1462 pm->pm_state != PMC_STATE_RUNNING ||
1463 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
1466 (void) pmc_process_interrupt(cpu, pm, tf,
1467 TRAPF_USERMODE(tf));
1472 * Fresh interrupt. Look for the CCCR_OVF bit
1473 * and the OVF_Tx bit for this logical
1474 * processor being set.
1476 cccrval = rdmsr(P4_CCCR_MSR_FIRST + ri);
1478 if ((cccrval & ovf_mask) != ovf_mask)
1482 * If the other logical CPU would also have been
1483 * interrupted due to the PMC being shared, record
1484 * this fact in the per-cpu saved interrupt flag
1487 if (p4_system_has_htt && (cccrval & ovf_partner))
1488 P4_PCPU_SET_INTRFLAG(pc, ri, 1);
1490 v = rdmsr(P4_PERFCTR_MSR_FIRST + ri);
1492 PMCDBG(MDP,INT, 2, "ri=%d v=%jx", ri, v);
1494 /* Stop the counter, and reset the overflow bit */
1495 cccrval &= ~(P4_CCCR_OVF | P4_CCCR_ENABLE);
1496 wrmsr(P4_CCCR_MSR_FIRST + ri, cccrval);
1501 * Ignore de-configured or stopped PMCs. Ignore PMCs
1502 * not in sampling mode.
1504 pm = pc->pc_p4pmcs[ri].phw_pmc;
1507 pm->pm_state != PMC_STATE_RUNNING ||
1508 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
1513 * Process the interrupt. Re-enable the PMC if
1514 * processing was successful.
1516 error = pmc_process_interrupt(cpu, pm, tf,
1517 TRAPF_USERMODE(tf));
1520 * Only the first processor executing the NMI handler
1521 * in a HTT pair will restart a PMC, and that too
1522 * only if there were no errors.
1524 v = P4_RELOAD_COUNT_TO_PERFCTR_VALUE(
1525 pm->pm_sc.pm_reloadcount);
1526 wrmsr(P4_PERFCTR_MSR_FIRST + ri, v);
1528 wrmsr(P4_CCCR_MSR_FIRST + ri,
1529 cccrval | P4_CCCR_ENABLE);
1532 /* allow the other CPU to proceed */
1533 if (p4_system_has_htt)
1534 P4_PCPU_REL_INTR_SPINLOCK(pc);
1537 * On Intel P4 CPUs, the PMC 'pcint' entry in the LAPIC gets
1538 * masked when a PMC interrupts the CPU. We need to unmask
1539 * the interrupt source explicitly.
1543 lapic_reenable_pmc();
1545 atomic_add_int(did_interrupt ? &pmc_stats.pm_intr_processed :
1546 &pmc_stats.pm_intr_ignored, 1);
1548 return (did_interrupt);
1552 * Describe a CPU's PMC state.
1556 p4_describe(int cpu, int ri, struct pmc_info *pi,
1561 const struct p4pmc_descr *pd;
1563 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1564 ("[p4,%d] illegal CPU %d", __LINE__, cpu));
1565 KASSERT(ri >= 0 && ri < P4_NPMCS,
1566 ("[p4,%d] row-index %d out of range", __LINE__, ri));
1568 PMCDBG(MDP,OPS,1,"p4-describe cpu=%d ri=%d", cpu, ri);
1570 if (P4_CPU_IS_HTT_SECONDARY(cpu))
1573 pd = &p4_pmcdesc[ri];
1575 if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
1576 PMC_NAME_MAX, &copied)) != 0)
1579 pi->pm_class = pd->pm_descr.pd_class;
1581 if (p4_pcpu[cpu]->pc_p4pmcs[ri].phw_state & PMC_PHW_FLAG_IS_ENABLED) {
1582 pi->pm_enabled = TRUE;
1583 *ppmc = p4_pcpu[cpu]->pc_p4pmcs[ri].phw_pmc;
1585 pi->pm_enabled = FALSE;
1593 * Get MSR# for use with RDPMC.
1597 p4_get_msr(int ri, uint32_t *msr)
1599 KASSERT(ri >= 0 && ri < P4_NPMCS,
1600 ("[p4,%d] ri %d out of range", __LINE__, ri));
1602 *msr = p4_pmcdesc[ri].pm_pmc_msr - P4_PERFCTR_MSR_FIRST;
1604 PMCDBG(MDP,OPS, 1, "ri=%d getmsr=0x%x", ri, *msr);
1611 pmc_p4_initialize(struct pmc_mdep *md, int ncpus)
1613 struct pmc_classdep *pcd;
1614 struct p4_event_descr *pe;
1616 KASSERT(md != NULL, ("[p4,%d] md is NULL", __LINE__));
1617 KASSERT(cpu_vendor_id == CPU_VENDOR_INTEL,
1618 ("[p4,%d] Initializing non-intel processor", __LINE__));
1620 PMCDBG(MDP,INI,1, "%s", "p4-initialize");
1622 /* Allocate space for pointers to per-cpu descriptors. */
1623 p4_pcpu = malloc(sizeof(struct p4_cpu **) * ncpus, M_PMC,
1626 /* Fill in the class dependent descriptor. */
1627 pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_P4];
1629 switch (md->pmd_cputype) {
1630 case PMC_CPU_INTEL_PIV:
1632 pcd->pcd_caps = P4_PMC_CAPS;
1633 pcd->pcd_class = PMC_CLASS_P4;
1634 pcd->pcd_num = P4_NPMCS;
1635 pcd->pcd_ri = md->pmd_npmc;
1636 pcd->pcd_width = 40;
1638 pcd->pcd_allocate_pmc = p4_allocate_pmc;
1639 pcd->pcd_config_pmc = p4_config_pmc;
1640 pcd->pcd_describe = p4_describe;
1641 pcd->pcd_get_config = p4_get_config;
1642 pcd->pcd_get_msr = p4_get_msr;
1643 pcd->pcd_pcpu_fini = p4_pcpu_fini;
1644 pcd->pcd_pcpu_init = p4_pcpu_init;
1645 pcd->pcd_read_pmc = p4_read_pmc;
1646 pcd->pcd_release_pmc = p4_release_pmc;
1647 pcd->pcd_start_pmc = p4_start_pmc;
1648 pcd->pcd_stop_pmc = p4_stop_pmc;
1649 pcd->pcd_write_pmc = p4_write_pmc;
1651 md->pmd_pcpu_fini = NULL;
1652 md->pmd_pcpu_init = NULL;
1653 md->pmd_intr = p4_intr;
1654 md->pmd_npmc += P4_NPMCS;
1656 /* model specific configuration */
1657 if ((cpu_id & 0xFFF) < 0xF27) {
1660 * On P4 and Xeon with CPUID < (Family 15,
1661 * Model 2, Stepping 7), only one ESCR is
1662 * available for the IOQ_ALLOCATION event.
1665 pe = p4_find_event(PMC_EV_P4_IOQ_ALLOCATION);
1666 pe->pm_escrs[1] = P4_ESCR_NONE;
1672 KASSERT(0,("[p4,%d] Unknown CPU type", __LINE__));
1680 pmc_p4_finalize(struct pmc_mdep *md)
1682 #if defined(INVARIANTS)
1686 KASSERT(p4_pcpu != NULL,
1687 ("[p4,%d] NULL p4_pcpu", __LINE__));
1689 #if defined(INVARIANTS)
1690 ncpus = pmc_cpu_max();
1691 for (i = 0; i < ncpus; i++)
1692 KASSERT(p4_pcpu[i] == NULL, ("[p4,%d] non-null pcpu %d",
1696 free(p4_pcpu, M_PMC);