2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1996, by Steve Passe
6 * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. The name of the developer may NOT be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 * 3. Neither the name of the author nor the names of any co-contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * Local APIC support on Pentium and later processors.
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include "opt_atpic.h"
40 #include "opt_hwpmc_hooks.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
48 #include <sys/kernel.h>
50 #include <sys/malloc.h>
51 #include <sys/mutex.h>
54 #include <sys/sched.h>
56 #include <sys/sysctl.h>
57 #include <sys/timeet.h>
58 #include <sys/timetc.h>
63 #include <x86/apicreg.h>
64 #include <machine/clock.h>
65 #include <machine/cpufunc.h>
66 #include <machine/cputypes.h>
67 #include <machine/fpu.h>
68 #include <machine/frame.h>
69 #include <machine/intr_machdep.h>
70 #include <x86/apicvar.h>
72 #include <machine/md_var.h>
73 #include <machine/smp.h>
74 #include <machine/specialreg.h>
78 #include <sys/interrupt.h>
83 #define SDT_APIC SDT_SYSIGT
86 #define SDT_APIC SDT_SYS386IGT
87 #define GSEL_APIC GSEL(GCODE_SEL, SEL_KPL)
90 static MALLOC_DEFINE(M_LAPIC, "local_apic", "Local APIC items");
92 /* Sanity checks on IDT vectors. */
93 CTASSERT(APIC_IO_INTS + APIC_NUM_IOINTS == APIC_TIMER_INT);
94 CTASSERT(APIC_TIMER_INT < APIC_LOCAL_INTS);
95 CTASSERT(APIC_LOCAL_INTS == 240);
96 CTASSERT(IPI_STOP < APIC_SPURIOUS_INT);
99 * I/O interrupts use non-negative IRQ values. These values are used
100 * to mark unused IDT entries or IDT entries reserved for a non-I/O
105 #define IRQ_SYSCALL -3
106 #define IRQ_DTRACE_RET -4
107 #define IRQ_EVTCHN -5
109 enum lat_timer_mode {
111 LAT_MODE_PERIODIC = 1,
112 LAT_MODE_ONESHOT = 2,
113 LAT_MODE_DEADLINE = 3,
117 * Support for local APICs. Local APICs manage interrupts on each
118 * individual processor as opposed to I/O APICs which receive interrupts
119 * from I/O devices and then forward them on to the local APICs.
121 * Local APICs can also send interrupts to each other thus providing the
122 * mechanism for IPIs.
126 u_int lvt_edgetrigger:1;
127 u_int lvt_activehi:1;
135 struct lvt la_lvts[APIC_LVT_MAX + 1];
136 struct lvt la_elvts[APIC_ELVT_MAX + 1];
139 u_int la_cluster_id:2;
141 u_long *la_timer_count;
142 uint64_t la_timer_period;
143 enum lat_timer_mode la_timer_mode;
144 uint32_t lvt_timer_base;
145 uint32_t lvt_timer_last;
146 /* Include IDT_SYSCALL to make indexing easier. */
147 int la_ioint_irqs[APIC_NUM_IOINTS + 1];
150 /* Global defaults for local APIC LVT entries. */
151 static struct lvt lvts[APIC_LVT_MAX + 1] = {
152 { 1, 1, 1, 1, APIC_LVT_DM_EXTINT, 0 }, /* LINT0: masked ExtINT */
153 { 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 }, /* LINT1: NMI */
154 { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_TIMER_INT }, /* Timer */
155 { 1, 1, 0, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT }, /* Error */
156 { 1, 1, 1, 1, APIC_LVT_DM_NMI, 0 }, /* PMC */
157 { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_THERMAL_INT }, /* Thermal */
158 { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_CMC_INT }, /* CMCI */
161 /* Global defaults for AMD local APIC ELVT entries. */
162 static struct lvt elvts[APIC_ELVT_MAX + 1] = {
163 { 1, 1, 1, 0, APIC_LVT_DM_FIXED, 0 },
164 { 1, 1, 1, 0, APIC_LVT_DM_FIXED, APIC_CMC_INT },
165 { 1, 1, 1, 0, APIC_LVT_DM_FIXED, 0 },
166 { 1, 1, 1, 0, APIC_LVT_DM_FIXED, 0 },
169 static inthand_t *ioint_handlers[] = {
171 IDTVEC(apic_isr1), /* 32 - 63 */
172 IDTVEC(apic_isr2), /* 64 - 95 */
173 IDTVEC(apic_isr3), /* 96 - 127 */
174 IDTVEC(apic_isr4), /* 128 - 159 */
175 IDTVEC(apic_isr5), /* 160 - 191 */
176 IDTVEC(apic_isr6), /* 192 - 223 */
177 IDTVEC(apic_isr7), /* 224 - 255 */
180 static inthand_t *ioint_pti_handlers[] = {
182 IDTVEC(apic_isr1_pti), /* 32 - 63 */
183 IDTVEC(apic_isr2_pti), /* 64 - 95 */
184 IDTVEC(apic_isr3_pti), /* 96 - 127 */
185 IDTVEC(apic_isr4_pti), /* 128 - 159 */
186 IDTVEC(apic_isr5_pti), /* 160 - 191 */
187 IDTVEC(apic_isr6_pti), /* 192 - 223 */
188 IDTVEC(apic_isr7_pti), /* 224 - 255 */
191 static u_int32_t lapic_timer_divisors[] = {
192 APIC_TDCR_1, APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16,
193 APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128
196 extern inthand_t IDTVEC(rsvd_pti), IDTVEC(rsvd);
198 volatile char *lapic_map;
199 vm_paddr_t lapic_paddr = DEFAULT_APIC_BASE;
201 int lapic_eoi_suppression;
202 static int lapic_timer_tsc_deadline;
203 static u_long lapic_timer_divisor, count_freq;
204 static struct eventtimer lapic_et;
206 static uint64_t lapic_ipi_wait_mult;
207 static int __read_mostly lapic_ds_idle_timeout = 1000000;
209 unsigned int max_apic_id;
211 SYSCTL_NODE(_hw, OID_AUTO, apic, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
213 SYSCTL_INT(_hw_apic, OID_AUTO, x2apic_mode, CTLFLAG_RD, &x2apic_mode, 0, "");
214 SYSCTL_INT(_hw_apic, OID_AUTO, eoi_suppression, CTLFLAG_RD,
215 &lapic_eoi_suppression, 0, "");
216 SYSCTL_INT(_hw_apic, OID_AUTO, timer_tsc_deadline, CTLFLAG_RD,
217 &lapic_timer_tsc_deadline, 0, "");
219 SYSCTL_INT(_hw_apic, OID_AUTO, ds_idle_timeout, CTLFLAG_RWTUN,
220 &lapic_ds_idle_timeout, 0,
221 "timeout (in us) for APIC Delivery Status to become Idle (xAPIC only)");
224 static void lapic_calibrate_initcount(struct lapic *la);
227 * Use __nosanitizethread to exempt the LAPIC I/O accessors from KCSan
228 * instrumentation. Otherwise, if x2APIC is not available, use of the global
229 * lapic_map will generate a KCSan false positive. While the mapping is
230 * shared among all CPUs, the physical access will always take place on the
231 * local CPU's APIC, so there isn't in fact a race here. Furthermore, the
232 * KCSan warning printf can cause a panic if issued during LAPIC access,
233 * due to attempted recursive use of event timer resources.
236 static uint32_t __nosanitizethread
237 lapic_read32(enum LAPIC_REGISTERS reg)
242 res = rdmsr32(MSR_APIC_000 + reg);
244 res = *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL);
249 static void __nosanitizethread
250 lapic_write32(enum LAPIC_REGISTERS reg, uint32_t val)
256 wrmsr(MSR_APIC_000 + reg, val);
258 *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL) = val;
262 static void __nosanitizethread
263 lapic_write32_nofence(enum LAPIC_REGISTERS reg, uint32_t val)
267 wrmsr(MSR_APIC_000 + reg, val);
269 *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL) = val;
275 lapic_read_icr_lo(void)
278 return (lapic_read32(LAPIC_ICR_LO));
282 lapic_write_icr(uint32_t vhi, uint32_t vlo)
288 v = ((uint64_t)vhi << 32) | vlo;
290 wrmsr(MSR_APIC_000 + LAPIC_ICR_LO, v);
292 saveintr = intr_disable();
293 lapic_write32(LAPIC_ICR_HI, vhi);
294 lapic_write32(LAPIC_ICR_LO, vlo);
295 intr_restore(saveintr);
300 lapic_write_icr_lo(uint32_t vlo)
305 wrmsr(MSR_APIC_000 + LAPIC_ICR_LO, vlo);
307 lapic_write32(LAPIC_ICR_LO, vlo);
312 lapic_write_self_ipi(uint32_t vector)
315 KASSERT(x2apic_mode, ("SELF IPI write in xAPIC mode"));
316 wrmsr(MSR_APIC_000 + LAPIC_SELF_IPI, vector);
321 native_lapic_enable_x2apic(void)
325 apic_base = rdmsr(MSR_APICBASE);
326 apic_base |= APICBASE_X2APIC | APICBASE_ENABLED;
327 wrmsr(MSR_APICBASE, apic_base);
331 native_lapic_is_x2apic(void)
335 apic_base = rdmsr(MSR_APICBASE);
336 return ((apic_base & (APICBASE_X2APIC | APICBASE_ENABLED)) ==
337 (APICBASE_X2APIC | APICBASE_ENABLED));
340 static void lapic_enable(void);
341 static void lapic_resume(struct pic *pic, bool suspend_cancelled);
342 static void lapic_timer_oneshot(struct lapic *);
343 static void lapic_timer_oneshot_nointr(struct lapic *, uint32_t);
344 static void lapic_timer_periodic(struct lapic *);
345 static void lapic_timer_deadline(struct lapic *);
346 static void lapic_timer_stop(struct lapic *);
347 static void lapic_timer_set_divisor(u_int divisor);
348 static uint32_t lvt_mode(struct lapic *la, u_int pin, uint32_t value);
349 static int lapic_et_start(struct eventtimer *et,
350 sbintime_t first, sbintime_t period);
351 static int lapic_et_stop(struct eventtimer *et);
352 static u_int apic_idt_to_irq(u_int apic_id, u_int vector);
353 static void lapic_set_tpr(u_int vector);
355 struct pic lapic_pic = { .pic_resume = lapic_resume };
357 /* Forward declarations for apic_ops */
358 static void native_lapic_create(u_int apic_id, int boot_cpu);
359 static void native_lapic_init(vm_paddr_t addr);
360 static void native_lapic_xapic_mode(void);
361 static void native_lapic_setup(int boot);
362 static void native_lapic_dump(const char *str);
363 static void native_lapic_disable(void);
364 static void native_lapic_eoi(void);
365 static int native_lapic_id(void);
366 static int native_lapic_intr_pending(u_int vector);
367 static u_int native_apic_cpuid(u_int apic_id);
368 static u_int native_apic_alloc_vector(u_int apic_id, u_int irq);
369 static u_int native_apic_alloc_vectors(u_int apic_id, u_int *irqs,
370 u_int count, u_int align);
371 static void native_apic_disable_vector(u_int apic_id, u_int vector);
372 static void native_apic_enable_vector(u_int apic_id, u_int vector);
373 static void native_apic_free_vector(u_int apic_id, u_int vector, u_int irq);
374 static void native_lapic_set_logical_id(u_int apic_id, u_int cluster,
376 static void native_lapic_calibrate_timer(void);
377 static int native_lapic_enable_pmc(void);
378 static void native_lapic_disable_pmc(void);
379 static void native_lapic_reenable_pmc(void);
380 static void native_lapic_enable_cmc(void);
381 static int native_lapic_enable_mca_elvt(void);
382 static int native_lapic_set_lvt_mask(u_int apic_id, u_int lvt,
384 static int native_lapic_set_lvt_mode(u_int apic_id, u_int lvt,
386 static int native_lapic_set_lvt_polarity(u_int apic_id, u_int lvt,
387 enum intr_polarity pol);
388 static int native_lapic_set_lvt_triggermode(u_int apic_id, u_int lvt,
389 enum intr_trigger trigger);
391 static void native_lapic_ipi_raw(register_t icrlo, u_int dest);
392 static void native_lapic_ipi_vectored(u_int vector, int dest);
393 static int native_lapic_ipi_wait(int delay);
395 static int native_lapic_ipi_alloc(inthand_t *ipifunc);
396 static void native_lapic_ipi_free(int vector);
398 struct apic_ops apic_ops = {
399 .create = native_lapic_create,
400 .init = native_lapic_init,
401 .xapic_mode = native_lapic_xapic_mode,
402 .is_x2apic = native_lapic_is_x2apic,
403 .setup = native_lapic_setup,
404 .dump = native_lapic_dump,
405 .disable = native_lapic_disable,
406 .eoi = native_lapic_eoi,
407 .id = native_lapic_id,
408 .intr_pending = native_lapic_intr_pending,
409 .set_logical_id = native_lapic_set_logical_id,
410 .cpuid = native_apic_cpuid,
411 .alloc_vector = native_apic_alloc_vector,
412 .alloc_vectors = native_apic_alloc_vectors,
413 .enable_vector = native_apic_enable_vector,
414 .disable_vector = native_apic_disable_vector,
415 .free_vector = native_apic_free_vector,
416 .calibrate_timer = native_lapic_calibrate_timer,
417 .enable_pmc = native_lapic_enable_pmc,
418 .disable_pmc = native_lapic_disable_pmc,
419 .reenable_pmc = native_lapic_reenable_pmc,
420 .enable_cmc = native_lapic_enable_cmc,
421 .enable_mca_elvt = native_lapic_enable_mca_elvt,
423 .ipi_raw = native_lapic_ipi_raw,
424 .ipi_vectored = native_lapic_ipi_vectored,
425 .ipi_wait = native_lapic_ipi_wait,
427 .ipi_alloc = native_lapic_ipi_alloc,
428 .ipi_free = native_lapic_ipi_free,
429 .set_lvt_mask = native_lapic_set_lvt_mask,
430 .set_lvt_mode = native_lapic_set_lvt_mode,
431 .set_lvt_polarity = native_lapic_set_lvt_polarity,
432 .set_lvt_triggermode = native_lapic_set_lvt_triggermode,
436 lvt_mode_impl(struct lapic *la, struct lvt *lvt, u_int pin, uint32_t value)
439 value &= ~(APIC_LVT_M | APIC_LVT_TM | APIC_LVT_IIPP | APIC_LVT_DM |
441 if (lvt->lvt_edgetrigger == 0)
442 value |= APIC_LVT_TM;
443 if (lvt->lvt_activehi == 0)
444 value |= APIC_LVT_IIPP_INTALO;
447 value |= lvt->lvt_mode;
448 switch (lvt->lvt_mode) {
449 case APIC_LVT_DM_NMI:
450 case APIC_LVT_DM_SMI:
451 case APIC_LVT_DM_INIT:
452 case APIC_LVT_DM_EXTINT:
453 if (!lvt->lvt_edgetrigger && bootverbose) {
454 printf("lapic%u: Forcing LINT%u to edge trigger\n",
456 value &= ~APIC_LVT_TM;
458 /* Use a vector of 0. */
460 case APIC_LVT_DM_FIXED:
461 value |= lvt->lvt_vector;
464 panic("bad APIC LVT delivery mode: %#x\n", value);
470 lvt_mode(struct lapic *la, u_int pin, uint32_t value)
474 KASSERT(pin <= APIC_LVT_MAX,
475 ("%s: pin %u out of range", __func__, pin));
476 if (la->la_lvts[pin].lvt_active)
477 lvt = &la->la_lvts[pin];
481 return (lvt_mode_impl(la, lvt, pin, value));
485 elvt_mode(struct lapic *la, u_int idx, uint32_t value)
489 KASSERT(idx <= APIC_ELVT_MAX,
490 ("%s: idx %u out of range", __func__, idx));
492 elvt = &la->la_elvts[idx];
493 KASSERT(elvt->lvt_active, ("%s: ELVT%u is not active", __func__, idx));
494 KASSERT(elvt->lvt_edgetrigger,
495 ("%s: ELVT%u is not edge triggered", __func__, idx));
496 KASSERT(elvt->lvt_activehi,
497 ("%s: ELVT%u is not active high", __func__, idx));
498 return (lvt_mode_impl(la, elvt, idx, value));
502 * Map the local APIC and setup necessary interrupt vectors.
505 native_lapic_init(vm_paddr_t addr)
508 uint64_t r, r1, r2, rx;
515 * Enable x2APIC mode if possible. Map the local APIC
518 * Keep the LAPIC registers page mapped uncached for x2APIC
519 * mode too, to have direct map page attribute set to
520 * uncached. This is needed to work around CPU errata present
521 * on all Intel processors.
523 KASSERT(trunc_page(addr) == addr,
524 ("local APIC not aligned on a page boundary"));
526 lapic_map = pmap_mapdev(addr, PAGE_SIZE);
528 native_lapic_enable_x2apic();
532 /* Setup the spurious interrupt handler. */
533 setidt(APIC_SPURIOUS_INT, IDTVEC(spuriousint), SDT_APIC, SEL_KPL,
536 /* Perform basic initialization of the BSP's local APIC. */
539 /* Set BSP's per-CPU local APIC ID. */
540 PCPU_SET(apic_id, lapic_id());
542 /* Local APIC timer interrupt. */
543 setidt(APIC_TIMER_INT, pti ? IDTVEC(timerint_pti) : IDTVEC(timerint),
544 SDT_APIC, SEL_KPL, GSEL_APIC);
546 /* Local APIC error interrupt. */
547 setidt(APIC_ERROR_INT, pti ? IDTVEC(errorint_pti) : IDTVEC(errorint),
548 SDT_APIC, SEL_KPL, GSEL_APIC);
550 /* XXX: Thermal interrupt */
552 /* Local APIC CMCI. */
553 setidt(APIC_CMC_INT, pti ? IDTVEC(cmcint_pti) : IDTVEC(cmcint),
554 SDT_APIC, SEL_KPL, GSEL_APIC);
556 if ((resource_int_value("apic", 0, "clock", &i) != 0 || i != 0)) {
557 /* Set if APIC timer runs in C3. */
558 arat = (cpu_power_eax & CPUTPM1_ARAT);
560 bzero(&lapic_et, sizeof(lapic_et));
561 lapic_et.et_name = "LAPIC";
562 lapic_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT |
564 lapic_et.et_quality = 600;
566 lapic_et.et_flags |= ET_FLAGS_C3STOP;
567 lapic_et.et_quality = 100;
569 if ((cpu_feature & CPUID_TSC) != 0 &&
570 (cpu_feature2 & CPUID2_TSCDLT) != 0 &&
571 tsc_is_invariant && tsc_freq != 0) {
572 lapic_timer_tsc_deadline = 1;
573 TUNABLE_INT_FETCH("hw.lapic_tsc_deadline",
574 &lapic_timer_tsc_deadline);
577 lapic_et.et_frequency = 0;
578 /* We don't know frequency yet, so trying to guess. */
579 lapic_et.et_min_period = 0x00001000LL;
580 lapic_et.et_max_period = SBT_1S;
581 lapic_et.et_start = lapic_et_start;
582 lapic_et.et_stop = lapic_et_stop;
583 lapic_et.et_priv = NULL;
584 et_register(&lapic_et);
588 * Set lapic_eoi_suppression after lapic_enable(), to not
589 * enable suppression in the hardware prematurely. Note that
590 * we by default enable suppression even when system only has
591 * one IO-APIC, since EOI is broadcasted to all APIC agents,
592 * including CPUs, otherwise.
594 * It seems that at least some KVM versions report
595 * EOI_SUPPRESSION bit, but auto-EOI does not work.
597 ver = lapic_read32(LAPIC_VERSION);
598 if ((ver & APIC_VER_EOI_SUPPRESSION) != 0) {
599 lapic_eoi_suppression = 1;
600 if (vm_guest == VM_GUEST_KVM) {
603 "KVM -- disabling lapic eoi suppression\n");
604 lapic_eoi_suppression = 0;
606 TUNABLE_INT_FETCH("hw.lapic_eoi_suppression",
607 &lapic_eoi_suppression);
613 * Calibrate the busy loop waiting for IPI ack in xAPIC mode.
614 * lapic_ipi_wait_mult contains the number of iterations which
615 * approximately delay execution for 1 microsecond (the
616 * argument to native_lapic_ipi_wait() is in microseconds).
618 * We assume that TSC is present and already measured.
619 * Possible TSC frequency jumps are irrelevant to the
620 * calibration loop below, the CPU clock management code is
621 * not yet started, and we do not enter sleep states.
623 KASSERT((cpu_feature & CPUID_TSC) != 0 && tsc_freq != 0,
624 ("TSC not initialized"));
627 for (rx = 0; rx < LOOPS; rx++) {
628 (void)lapic_read_icr_lo();
632 r1 = tsc_freq * LOOPS;
634 lapic_ipi_wait_mult = r1 >= r2 ? r1 / r2 : 1;
636 printf("LAPIC: ipi_wait() us multiplier %ju (r %ju "
637 "tsc %ju)\n", (uintmax_t)lapic_ipi_wait_mult,
638 (uintmax_t)r, (uintmax_t)tsc_freq);
646 * Create a local APIC instance.
649 native_lapic_create(u_int apic_id, int boot_cpu)
653 if (apic_id > max_apic_id) {
654 printf("APIC: Ignoring local APIC with ID %d\n", apic_id);
656 panic("Can't ignore BSP");
659 KASSERT(!lapics[apic_id].la_present, ("duplicate local APIC %u",
663 * Assume no local LVT overrides and a cluster of 0 and
664 * intra-cluster ID of 0.
666 lapics[apic_id].la_present = 1;
667 lapics[apic_id].la_id = apic_id;
668 for (i = 0; i <= APIC_LVT_MAX; i++) {
669 lapics[apic_id].la_lvts[i] = lvts[i];
670 lapics[apic_id].la_lvts[i].lvt_active = 0;
672 for (i = 0; i <= APIC_ELVT_MAX; i++) {
673 lapics[apic_id].la_elvts[i] = elvts[i];
674 lapics[apic_id].la_elvts[i].lvt_active = 0;
676 for (i = 0; i <= APIC_NUM_IOINTS; i++)
677 lapics[apic_id].la_ioint_irqs[i] = IRQ_FREE;
678 lapics[apic_id].la_ioint_irqs[IDT_SYSCALL - APIC_IO_INTS] = IRQ_SYSCALL;
679 lapics[apic_id].la_ioint_irqs[APIC_TIMER_INT - APIC_IO_INTS] =
682 lapics[apic_id].la_ioint_irqs[IDT_DTRACE_RET - APIC_IO_INTS] =
686 lapics[apic_id].la_ioint_irqs[IDT_EVTCHN - APIC_IO_INTS] = IRQ_EVTCHN;
690 cpu_add(apic_id, boot_cpu);
694 static inline uint32_t
695 amd_read_ext_features(void)
699 if (cpu_vendor_id != CPU_VENDOR_AMD &&
700 cpu_vendor_id != CPU_VENDOR_HYGON)
702 version = lapic_read32(LAPIC_VERSION);
703 if ((version & APIC_VER_AMD_EXT_SPACE) != 0)
704 return (lapic_read32(LAPIC_EXT_FEATURES));
709 static inline uint32_t
710 amd_read_elvt_count(void)
715 extf = amd_read_ext_features();
716 count = (extf & APIC_EXTF_ELVT_MASK) >> APIC_EXTF_ELVT_SHIFT;
717 count = min(count, APIC_ELVT_MAX + 1);
722 * Dump contents of local APIC registers
725 native_lapic_dump(const char* str)
733 version = lapic_read32(LAPIC_VERSION);
734 maxlvt = (version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
735 printf("cpu%d %s:\n", PCPU_GET(cpuid), str);
736 printf(" ID: 0x%08x VER: 0x%08x LDR: 0x%08x DFR: 0x%08x",
737 lapic_read32(LAPIC_ID), version,
738 lapic_read32(LAPIC_LDR), x2apic_mode ? 0 : lapic_read32(LAPIC_DFR));
739 if ((cpu_feature2 & CPUID2_X2APIC) != 0)
740 printf(" x2APIC: %d", x2apic_mode);
741 printf("\n lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
742 lapic_read32(LAPIC_LVT_LINT0), lapic_read32(LAPIC_LVT_LINT1),
743 lapic_read32(LAPIC_TPR), lapic_read32(LAPIC_SVR));
744 printf(" timer: 0x%08x therm: 0x%08x err: 0x%08x",
745 lapic_read32(LAPIC_LVT_TIMER), lapic_read32(LAPIC_LVT_THERMAL),
746 lapic_read32(LAPIC_LVT_ERROR));
747 if (maxlvt >= APIC_LVT_PMC)
748 printf(" pmc: 0x%08x", lapic_read32(LAPIC_LVT_PCINT));
750 if (maxlvt >= APIC_LVT_CMCI)
751 printf(" cmci: 0x%08x\n", lapic_read32(LAPIC_LVT_CMCI));
752 extf = amd_read_ext_features();
754 printf(" AMD ext features: 0x%08x", extf);
755 elvt_count = amd_read_elvt_count();
756 for (i = 0; i < elvt_count; i++)
757 printf("%s elvt%d: 0x%08x", (i % 4) ? "" : "\n ", i,
758 lapic_read32(LAPIC_EXT_LVT0 + i));
764 native_lapic_xapic_mode(void)
768 saveintr = intr_disable();
770 native_lapic_enable_x2apic();
771 intr_restore(saveintr);
775 native_lapic_setup(int boot)
784 saveintr = intr_disable();
786 la = &lapics[lapic_id()];
787 KASSERT(la->la_present, ("missing APIC structure"));
788 version = lapic_read32(LAPIC_VERSION);
789 maxlvt = (version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
791 /* Initialize the TPR to allow all interrupts. */
794 /* Setup spurious vector and enable the local APIC. */
797 /* Program LINT[01] LVT entries. */
798 lapic_write32(LAPIC_LVT_LINT0, lvt_mode(la, APIC_LVT_LINT0,
799 lapic_read32(LAPIC_LVT_LINT0)));
800 lapic_write32(LAPIC_LVT_LINT1, lvt_mode(la, APIC_LVT_LINT1,
801 lapic_read32(LAPIC_LVT_LINT1)));
803 /* Program the PMC LVT entry if present. */
804 if (maxlvt >= APIC_LVT_PMC) {
805 lapic_write32(LAPIC_LVT_PCINT, lvt_mode(la, APIC_LVT_PMC,
810 * Program the timer LVT. Calibration is deferred until it is certain
811 * that we have a reliable timecounter.
813 la->lvt_timer_base = lvt_mode(la, APIC_LVT_TIMER,
814 lapic_read32(LAPIC_LVT_TIMER));
815 la->lvt_timer_last = la->lvt_timer_base;
816 lapic_write32(LAPIC_LVT_TIMER, la->lvt_timer_base);
819 la->la_timer_mode = LAT_MODE_UNDEF;
820 else if (la->la_timer_mode != LAT_MODE_UNDEF) {
821 KASSERT(la->la_timer_period != 0, ("lapic%u: zero divisor",
823 switch (la->la_timer_mode) {
824 case LAT_MODE_PERIODIC:
825 lapic_timer_set_divisor(lapic_timer_divisor);
826 lapic_timer_periodic(la);
828 case LAT_MODE_ONESHOT:
829 lapic_timer_set_divisor(lapic_timer_divisor);
830 lapic_timer_oneshot(la);
832 case LAT_MODE_DEADLINE:
833 lapic_timer_deadline(la);
836 panic("corrupted la_timer_mode %p %d", la,
841 /* Program error LVT and clear any existing errors. */
842 lapic_write32(LAPIC_LVT_ERROR, lvt_mode(la, APIC_LVT_ERROR,
843 lapic_read32(LAPIC_LVT_ERROR)));
844 lapic_write32(LAPIC_ESR, 0);
846 /* XXX: Thermal LVT */
848 /* Program the CMCI LVT entry if present. */
849 if (maxlvt >= APIC_LVT_CMCI) {
850 lapic_write32(LAPIC_LVT_CMCI, lvt_mode(la, APIC_LVT_CMCI,
851 lapic_read32(LAPIC_LVT_CMCI)));
854 elvt_count = amd_read_elvt_count();
855 for (i = 0; i < elvt_count; i++) {
856 if (la->la_elvts[i].lvt_active)
857 lapic_write32(LAPIC_EXT_LVT0 + i,
858 elvt_mode(la, i, lapic_read32(LAPIC_EXT_LVT0 + i)));
861 intr_restore(saveintr);
865 native_lapic_intrcnt(void *dummy __unused)
869 char buf[MAXCOMLEN + 1];
871 /* If there are no APICs, skip this function. */
875 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
876 la = &lapics[pc->pc_apic_id];
880 snprintf(buf, sizeof(buf), "cpu%d:timer", pc->pc_cpuid);
881 intrcnt_add(buf, &la->la_timer_count);
884 SYSINIT(native_lapic_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, native_lapic_intrcnt,
888 native_lapic_reenable_pmc(void)
893 value = lapic_read32(LAPIC_LVT_PCINT);
894 value &= ~APIC_LVT_M;
895 lapic_write32(LAPIC_LVT_PCINT, value);
901 lapic_update_pmc(void *dummy)
905 la = &lapics[lapic_id()];
906 lapic_write32(LAPIC_LVT_PCINT, lvt_mode(la, APIC_LVT_PMC,
907 lapic_read32(LAPIC_LVT_PCINT)));
912 native_lapic_calibrate_timer(void)
918 /* Fail if the local APIC is not present. */
919 if (!x2apic_mode && lapic_map == NULL)
923 intr = intr_disable();
924 la = &lapics[lapic_id()];
926 lapic_calibrate_initcount(la);
930 if (lapic_timer_tsc_deadline && bootverbose) {
931 printf("lapic: deadline tsc mode, Frequency %ju Hz\n",
932 (uintmax_t)tsc_freq);
937 native_lapic_enable_pmc(void)
943 /* Fail if the local APIC is not present. */
944 if (!x2apic_mode && lapic_map == NULL)
948 /* Fail if the PMC LVT is not present. */
949 maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
950 if (maxlvt < APIC_LVT_PMC)
953 lvts[APIC_LVT_PMC].lvt_masked = 0;
955 #ifdef EARLY_AP_STARTUP
956 MPASS(mp_ncpus == 1 || smp_started);
957 smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
961 * If hwpmc was loaded at boot time then the APs may not be
962 * started yet. In that case, don't forward the request to
963 * them as they will program the lvt when they start.
966 smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
969 lapic_update_pmc(NULL);
978 native_lapic_disable_pmc(void)
984 /* Fail if the local APIC is not present. */
985 if (!x2apic_mode && lapic_map == NULL)
989 /* Fail if the PMC LVT is not present. */
990 maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
991 if (maxlvt < APIC_LVT_PMC)
994 lvts[APIC_LVT_PMC].lvt_masked = 1;
997 /* The APs should always be started when hwpmc is unloaded. */
998 KASSERT(mp_ncpus == 1 || smp_started, ("hwpmc unloaded too early"));
1000 smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
1005 lapic_calibrate_initcount_cpuid_vm(void)
1010 /* Get value from CPUID leaf if possible. */
1011 if (vm_guest == VM_GUEST_NO)
1013 if (hv_high < 0x40000010)
1015 do_cpuid(0x40000010, regs);
1016 freq = (uint64_t)(regs[1]) * 1000;
1018 /* Pick timer divisor. */
1019 lapic_timer_divisor = 2;
1021 if (freq / lapic_timer_divisor < APIC_TIMER_MAX_COUNT)
1023 lapic_timer_divisor <<= 1;
1024 } while (lapic_timer_divisor <= 128);
1025 if (lapic_timer_divisor > 128)
1028 /* Record divided frequency. */
1029 count_freq = freq / lapic_timer_divisor;
1034 cb_lapic_getcount(void)
1037 return (APIC_TIMER_MAX_COUNT - lapic_read32(LAPIC_CCR_TIMER));
1041 lapic_calibrate_initcount(struct lapic *la)
1045 if (lapic_calibrate_initcount_cpuid_vm())
1048 /* Calibrate the APIC timer frequency. */
1049 lapic_timer_set_divisor(2);
1050 lapic_timer_oneshot_nointr(la, APIC_TIMER_MAX_COUNT);
1051 fpu_kern_enter(curthread, NULL, FPU_KERN_NOCTX);
1052 freq = clockcalib(cb_lapic_getcount, "lapic");
1053 fpu_kern_leave(curthread, NULL);
1055 /* Pick a different divisor if necessary. */
1056 lapic_timer_divisor = 2;
1058 if (freq * 2 / lapic_timer_divisor < APIC_TIMER_MAX_COUNT)
1060 lapic_timer_divisor <<= 1;
1061 } while (lapic_timer_divisor <= 128);
1062 if (lapic_timer_divisor > 128)
1063 panic("lapic: Divisor too big");
1064 count_freq = freq * 2 / lapic_timer_divisor;
1067 printf("lapic: Divisor %lu, Frequency %lu Hz\n",
1068 lapic_timer_divisor, count_freq);
1073 lapic_change_mode(struct eventtimer *et, struct lapic *la,
1074 enum lat_timer_mode newmode)
1076 if (la->la_timer_mode == newmode)
1079 case LAT_MODE_PERIODIC:
1080 lapic_timer_set_divisor(lapic_timer_divisor);
1081 et->et_frequency = count_freq;
1083 case LAT_MODE_DEADLINE:
1084 et->et_frequency = tsc_freq;
1086 case LAT_MODE_ONESHOT:
1087 lapic_timer_set_divisor(lapic_timer_divisor);
1088 et->et_frequency = count_freq;
1091 panic("lapic_change_mode %d", newmode);
1093 la->la_timer_mode = newmode;
1094 et->et_min_period = (0x00000002LLU << 32) / et->et_frequency;
1095 et->et_max_period = (0xfffffffeLLU << 32) / et->et_frequency;
1099 lapic_et_start(struct eventtimer *et, sbintime_t first, sbintime_t period)
1103 la = &lapics[PCPU_GET(apic_id)];
1105 lapic_change_mode(et, la, LAT_MODE_PERIODIC);
1106 la->la_timer_period = ((uint32_t)et->et_frequency * period) >>
1108 lapic_timer_periodic(la);
1109 } else if (lapic_timer_tsc_deadline) {
1110 lapic_change_mode(et, la, LAT_MODE_DEADLINE);
1111 la->la_timer_period = (et->et_frequency * first) >> 32;
1112 lapic_timer_deadline(la);
1114 lapic_change_mode(et, la, LAT_MODE_ONESHOT);
1115 la->la_timer_period = ((uint32_t)et->et_frequency * first) >>
1117 lapic_timer_oneshot(la);
1123 lapic_et_stop(struct eventtimer *et)
1127 la = &lapics[PCPU_GET(apic_id)];
1128 lapic_timer_stop(la);
1129 la->la_timer_mode = LAT_MODE_UNDEF;
1134 native_lapic_disable(void)
1138 /* Software disable the local APIC. */
1139 value = lapic_read32(LAPIC_SVR);
1140 value &= ~APIC_SVR_SWEN;
1141 lapic_write32(LAPIC_SVR, value);
1149 /* Program the spurious vector to enable the local APIC. */
1150 value = lapic_read32(LAPIC_SVR);
1151 value &= ~(APIC_SVR_VECTOR | APIC_SVR_FOCUS);
1152 value |= APIC_SVR_FEN | APIC_SVR_SWEN | APIC_SPURIOUS_INT;
1153 if (lapic_eoi_suppression)
1154 value |= APIC_SVR_EOI_SUPPRESSION;
1155 lapic_write32(LAPIC_SVR, value);
1158 /* Reset the local APIC on the BSP during resume. */
1160 lapic_resume(struct pic *pic, bool suspend_cancelled)
1167 native_lapic_id(void)
1171 KASSERT(x2apic_mode || lapic_map != NULL, ("local APIC is not mapped"));
1172 v = lapic_read32(LAPIC_ID);
1174 v >>= APIC_ID_SHIFT;
1179 native_lapic_intr_pending(u_int vector)
1184 * The IRR registers are an array of registers each of which
1185 * only describes 32 interrupts in the low 32 bits. Thus, we
1186 * divide the vector by 32 to get the register index.
1187 * Finally, we modulus the vector by 32 to determine the
1188 * individual bit to test.
1190 irr = lapic_read32(LAPIC_IRR0 + vector / 32);
1191 return (irr & 1 << (vector % 32));
1195 native_lapic_set_logical_id(u_int apic_id, u_int cluster, u_int cluster_id)
1199 KASSERT(lapics[apic_id].la_present, ("%s: APIC %u doesn't exist",
1200 __func__, apic_id));
1201 KASSERT(cluster <= APIC_MAX_CLUSTER, ("%s: cluster %u too big",
1202 __func__, cluster));
1203 KASSERT(cluster_id <= APIC_MAX_INTRACLUSTER_ID,
1204 ("%s: intra cluster id %u too big", __func__, cluster_id));
1205 la = &lapics[apic_id];
1206 la->la_cluster = cluster;
1207 la->la_cluster_id = cluster_id;
1211 native_lapic_set_lvt_mask(u_int apic_id, u_int pin, u_char masked)
1214 if (pin > APIC_LVT_MAX)
1216 if (apic_id == APIC_ID_ALL) {
1217 lvts[pin].lvt_masked = masked;
1221 KASSERT(lapics[apic_id].la_present,
1222 ("%s: missing APIC %u", __func__, apic_id));
1223 lapics[apic_id].la_lvts[pin].lvt_masked = masked;
1224 lapics[apic_id].la_lvts[pin].lvt_active = 1;
1226 printf("lapic%u:", apic_id);
1229 printf(" LINT%u %s\n", pin, masked ? "masked" : "unmasked");
1234 native_lapic_set_lvt_mode(u_int apic_id, u_int pin, u_int32_t mode)
1238 if (pin > APIC_LVT_MAX)
1240 if (apic_id == APIC_ID_ALL) {
1245 KASSERT(lapics[apic_id].la_present,
1246 ("%s: missing APIC %u", __func__, apic_id));
1247 lvt = &lapics[apic_id].la_lvts[pin];
1248 lvt->lvt_active = 1;
1250 printf("lapic%u:", apic_id);
1252 lvt->lvt_mode = mode;
1254 case APIC_LVT_DM_NMI:
1255 case APIC_LVT_DM_SMI:
1256 case APIC_LVT_DM_INIT:
1257 case APIC_LVT_DM_EXTINT:
1258 lvt->lvt_edgetrigger = 1;
1259 lvt->lvt_activehi = 1;
1260 if (mode == APIC_LVT_DM_EXTINT)
1261 lvt->lvt_masked = 1;
1263 lvt->lvt_masked = 0;
1266 panic("Unsupported delivery mode: 0x%x\n", mode);
1269 printf(" Routing ");
1271 case APIC_LVT_DM_NMI:
1274 case APIC_LVT_DM_SMI:
1277 case APIC_LVT_DM_INIT:
1280 case APIC_LVT_DM_EXTINT:
1284 printf(" -> LINT%u\n", pin);
1290 native_lapic_set_lvt_polarity(u_int apic_id, u_int pin, enum intr_polarity pol)
1293 if (pin > APIC_LVT_MAX || pol == INTR_POLARITY_CONFORM)
1295 if (apic_id == APIC_ID_ALL) {
1296 lvts[pin].lvt_activehi = (pol == INTR_POLARITY_HIGH);
1300 KASSERT(lapics[apic_id].la_present,
1301 ("%s: missing APIC %u", __func__, apic_id));
1302 lapics[apic_id].la_lvts[pin].lvt_active = 1;
1303 lapics[apic_id].la_lvts[pin].lvt_activehi =
1304 (pol == INTR_POLARITY_HIGH);
1306 printf("lapic%u:", apic_id);
1309 printf(" LINT%u polarity: %s\n", pin,
1310 pol == INTR_POLARITY_HIGH ? "high" : "low");
1315 native_lapic_set_lvt_triggermode(u_int apic_id, u_int pin,
1316 enum intr_trigger trigger)
1319 if (pin > APIC_LVT_MAX || trigger == INTR_TRIGGER_CONFORM)
1321 if (apic_id == APIC_ID_ALL) {
1322 lvts[pin].lvt_edgetrigger = (trigger == INTR_TRIGGER_EDGE);
1326 KASSERT(lapics[apic_id].la_present,
1327 ("%s: missing APIC %u", __func__, apic_id));
1328 lapics[apic_id].la_lvts[pin].lvt_edgetrigger =
1329 (trigger == INTR_TRIGGER_EDGE);
1330 lapics[apic_id].la_lvts[pin].lvt_active = 1;
1332 printf("lapic%u:", apic_id);
1335 printf(" LINT%u trigger: %s\n", pin,
1336 trigger == INTR_TRIGGER_EDGE ? "edge" : "level");
1341 * Adjust the TPR of the current CPU so that it blocks all interrupts below
1342 * the passed in vector.
1345 lapic_set_tpr(u_int vector)
1348 lapic_write32(LAPIC_TPR, vector);
1352 tpr = lapic_read32(LAPIC_TPR) & ~APIC_TPR_PRIO;
1354 lapic_write32(LAPIC_TPR, tpr);
1359 native_lapic_eoi(void)
1362 lapic_write32_nofence(LAPIC_EOI, 0);
1366 lapic_handle_intr(int vector, struct trapframe *frame)
1368 struct intsrc *isrc;
1370 /* The frame may have been written into a poisoned region. */
1371 kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
1372 trap_check_kstack();
1374 isrc = intr_lookup_source(apic_idt_to_irq(PCPU_GET(apic_id),
1376 intr_execute_handlers(isrc, frame);
1380 lapic_handle_timer(struct trapframe *frame)
1383 struct trapframe *oldframe;
1386 /* Send EOI first thing. */
1389 /* The frame may have been written into a poisoned region. */
1390 kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
1391 trap_check_kstack();
1393 #if defined(SMP) && !defined(SCHED_ULE)
1395 * Don't do any accounting for the disabled HTT cores, since it
1396 * will provide misleading numbers for the userland.
1398 * No locking is necessary here, since even if we lose the race
1399 * when hlt_cpus_mask changes it is not a big deal, really.
1401 * Don't do that for ULE, since ULE doesn't consider hlt_cpus_mask
1402 * and unlike other schedulers it actually schedules threads to
1405 if (CPU_ISSET(PCPU_GET(cpuid), &hlt_cpus_mask))
1409 /* Look up our local APIC structure for the tick counters. */
1410 la = &lapics[PCPU_GET(apic_id)];
1411 (*la->la_timer_count)++;
1413 if (lapic_et.et_active) {
1415 td->td_intr_nesting_level++;
1416 oldframe = td->td_intr_frame;
1417 td->td_intr_frame = frame;
1418 lapic_et.et_event_cb(&lapic_et, lapic_et.et_arg);
1419 td->td_intr_frame = oldframe;
1420 td->td_intr_nesting_level--;
1426 lapic_timer_set_divisor(u_int divisor)
1429 KASSERT(powerof2(divisor), ("lapic: invalid divisor %u", divisor));
1430 KASSERT(ffs(divisor) <= nitems(lapic_timer_divisors),
1431 ("lapic: invalid divisor %u", divisor));
1432 lapic_write32(LAPIC_DCR_TIMER, lapic_timer_divisors[ffs(divisor) - 1]);
1436 lapic_timer_oneshot(struct lapic *la)
1440 value = la->lvt_timer_base;
1441 value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1442 value |= APIC_LVTT_TM_ONE_SHOT;
1443 la->lvt_timer_last = value;
1444 lapic_write32(LAPIC_LVT_TIMER, value);
1445 lapic_write32(LAPIC_ICR_TIMER, la->la_timer_period);
1449 lapic_timer_oneshot_nointr(struct lapic *la, uint32_t count)
1453 value = la->lvt_timer_base;
1454 value &= ~APIC_LVTT_TM;
1455 value |= APIC_LVTT_TM_ONE_SHOT | APIC_LVT_M;
1456 la->lvt_timer_last = value;
1457 lapic_write32(LAPIC_LVT_TIMER, value);
1458 lapic_write32(LAPIC_ICR_TIMER, count);
1462 lapic_timer_periodic(struct lapic *la)
1466 value = la->lvt_timer_base;
1467 value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1468 value |= APIC_LVTT_TM_PERIODIC;
1469 la->lvt_timer_last = value;
1470 lapic_write32(LAPIC_LVT_TIMER, value);
1471 lapic_write32(LAPIC_ICR_TIMER, la->la_timer_period);
1475 lapic_timer_deadline(struct lapic *la)
1479 value = la->lvt_timer_base;
1480 value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1481 value |= APIC_LVTT_TM_TSCDLT;
1482 if (value != la->lvt_timer_last) {
1483 la->lvt_timer_last = value;
1484 lapic_write32_nofence(LAPIC_LVT_TIMER, value);
1488 wrmsr(MSR_TSC_DEADLINE, la->la_timer_period + rdtsc());
1492 lapic_timer_stop(struct lapic *la)
1496 if (la->la_timer_mode == LAT_MODE_DEADLINE) {
1497 wrmsr(MSR_TSC_DEADLINE, 0);
1500 value = la->lvt_timer_base;
1501 value &= ~APIC_LVTT_TM;
1502 value |= APIC_LVT_M;
1503 la->lvt_timer_last = value;
1504 lapic_write32(LAPIC_LVT_TIMER, value);
1509 lapic_handle_cmc(void)
1511 trap_check_kstack();
1518 * Called from the mca_init() to activate the CMC interrupt if this CPU is
1519 * responsible for monitoring any MC banks for CMC events. Since mca_init()
1520 * is called prior to lapic_setup() during boot, this just needs to unmask
1521 * this CPU's LVT_CMCI entry.
1524 native_lapic_enable_cmc(void)
1529 if (!x2apic_mode && lapic_map == NULL)
1532 apic_id = PCPU_GET(apic_id);
1533 KASSERT(lapics[apic_id].la_present,
1534 ("%s: missing APIC %u", __func__, apic_id));
1535 lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_masked = 0;
1536 lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_active = 1;
1540 native_lapic_enable_mca_elvt(void)
1547 if (lapic_map == NULL)
1551 apic_id = PCPU_GET(apic_id);
1552 KASSERT(lapics[apic_id].la_present,
1553 ("%s: missing APIC %u", __func__, apic_id));
1554 elvt_count = amd_read_elvt_count();
1555 if (elvt_count <= APIC_ELVT_MCA)
1558 value = lapic_read32(LAPIC_EXT_LVT0 + APIC_ELVT_MCA);
1559 if ((value & APIC_LVT_M) == 0) {
1561 printf("AMD MCE Thresholding Extended LVT is already active\n");
1562 return (APIC_ELVT_MCA);
1564 lapics[apic_id].la_elvts[APIC_ELVT_MCA].lvt_masked = 0;
1565 lapics[apic_id].la_elvts[APIC_ELVT_MCA].lvt_active = 1;
1566 return (APIC_ELVT_MCA);
1570 lapic_handle_error(void)
1574 trap_check_kstack();
1577 * Read the contents of the error status register. Write to
1578 * the register first before reading from it to force the APIC
1579 * to update its value to indicate any errors that have
1580 * occurred since the previous write to the register.
1582 lapic_write32(LAPIC_ESR, 0);
1583 esr = lapic_read32(LAPIC_ESR);
1585 printf("CPU%d: local APIC error 0x%x\n", PCPU_GET(cpuid), esr);
1590 native_apic_cpuid(u_int apic_id)
1593 return apic_cpuids[apic_id];
1599 /* Request a free IDT vector to be used by the specified IRQ. */
1601 native_apic_alloc_vector(u_int apic_id, u_int irq)
1605 KASSERT(irq < num_io_irqs, ("Invalid IRQ %u", irq));
1608 * Search for a free vector. Currently we just use a very simple
1609 * algorithm to find the first free vector.
1611 mtx_lock_spin(&icu_lock);
1612 for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
1613 if (lapics[apic_id].la_ioint_irqs[vector] != IRQ_FREE)
1615 lapics[apic_id].la_ioint_irqs[vector] = irq;
1616 mtx_unlock_spin(&icu_lock);
1617 return (vector + APIC_IO_INTS);
1619 mtx_unlock_spin(&icu_lock);
1624 * Request 'count' free contiguous IDT vectors to be used by 'count'
1625 * IRQs. 'count' must be a power of two and the vectors will be
1626 * aligned on a boundary of 'align'. If the request cannot be
1627 * satisfied, 0 is returned.
1630 native_apic_alloc_vectors(u_int apic_id, u_int *irqs, u_int count, u_int align)
1632 u_int first, run, vector;
1634 KASSERT(powerof2(count), ("bad count"));
1635 KASSERT(powerof2(align), ("bad align"));
1636 KASSERT(align >= count, ("align < count"));
1638 for (run = 0; run < count; run++)
1639 KASSERT(irqs[run] < num_io_irqs, ("Invalid IRQ %u at index %u",
1644 * Search for 'count' free vectors. As with apic_alloc_vector(),
1645 * this just uses a simple first fit algorithm.
1649 mtx_lock_spin(&icu_lock);
1650 for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
1651 /* Vector is in use, end run. */
1652 if (lapics[apic_id].la_ioint_irqs[vector] != IRQ_FREE) {
1658 /* Start a new run if run == 0 and vector is aligned. */
1660 if ((vector & (align - 1)) != 0)
1666 /* Keep looping if the run isn't long enough yet. */
1670 /* Found a run, assign IRQs and return the first vector. */
1671 for (vector = 0; vector < count; vector++)
1672 lapics[apic_id].la_ioint_irqs[first + vector] =
1674 mtx_unlock_spin(&icu_lock);
1675 return (first + APIC_IO_INTS);
1677 mtx_unlock_spin(&icu_lock);
1678 printf("APIC: Couldn't find APIC vectors for %u IRQs\n", count);
1683 * Enable a vector for a particular apic_id. Since all lapics share idt
1684 * entries and ioint_handlers this enables the vector on all lapics. lapics
1685 * which do not have the vector configured would report spurious interrupts
1689 native_apic_enable_vector(u_int apic_id, u_int vector)
1692 KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1693 KASSERT(ioint_handlers[vector / 32] != NULL,
1694 ("No ISR handler for vector %u", vector));
1695 #ifdef KDTRACE_HOOKS
1696 KASSERT(vector != IDT_DTRACE_RET,
1697 ("Attempt to overwrite DTrace entry"));
1699 setidt(vector, (pti ? ioint_pti_handlers : ioint_handlers)[vector / 32],
1700 SDT_APIC, SEL_KPL, GSEL_APIC);
1704 native_apic_disable_vector(u_int apic_id, u_int vector)
1707 KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1708 #ifdef KDTRACE_HOOKS
1709 KASSERT(vector != IDT_DTRACE_RET,
1710 ("Attempt to overwrite DTrace entry"));
1712 KASSERT(ioint_handlers[vector / 32] != NULL,
1713 ("No ISR handler for vector %u", vector));
1716 * We can not currently clear the idt entry because other cpus
1717 * may have a valid vector at this offset.
1719 setidt(vector, pti ? &IDTVEC(rsvd_pti) : &IDTVEC(rsvd), SDT_APIC,
1720 SEL_KPL, GSEL_APIC);
1724 /* Release an APIC vector when it's no longer in use. */
1726 native_apic_free_vector(u_int apic_id, u_int vector, u_int irq)
1730 KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1731 vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1732 ("Vector %u does not map to an IRQ line", vector));
1733 KASSERT(irq < num_io_irqs, ("Invalid IRQ %u", irq));
1734 KASSERT(lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] ==
1735 irq, ("IRQ mismatch"));
1736 #ifdef KDTRACE_HOOKS
1737 KASSERT(vector != IDT_DTRACE_RET,
1738 ("Attempt to overwrite DTrace entry"));
1742 * Bind us to the cpu that owned the vector before freeing it so
1743 * we don't lose an interrupt delivery race.
1748 if (sched_is_bound(td))
1749 panic("apic_free_vector: Thread already bound.\n");
1750 sched_bind(td, apic_cpuid(apic_id));
1753 mtx_lock_spin(&icu_lock);
1754 lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] = IRQ_FREE;
1755 mtx_unlock_spin(&icu_lock);
1763 /* Map an IDT vector (APIC) to an IRQ (interrupt source). */
1765 apic_idt_to_irq(u_int apic_id, u_int vector)
1769 KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1770 vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1771 ("Vector %u does not map to an IRQ line", vector));
1772 #ifdef KDTRACE_HOOKS
1773 KASSERT(vector != IDT_DTRACE_RET,
1774 ("Attempt to overwrite DTrace entry"));
1776 irq = lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS];
1784 * Dump data about APIC IDT vector mappings.
1786 DB_SHOW_COMMAND(apic, db_show_apic)
1788 struct intsrc *isrc;
1793 if (strcmp(modif, "vv") == 0)
1795 else if (strcmp(modif, "v") == 0)
1799 for (apic_id = 0; apic_id <= max_apic_id; apic_id++) {
1800 if (lapics[apic_id].la_present == 0)
1802 db_printf("Interrupts bound to lapic %u\n", apic_id);
1803 for (i = 0; i < APIC_NUM_IOINTS + 1 && !db_pager_quit; i++) {
1804 irq = lapics[apic_id].la_ioint_irqs[i];
1805 if (irq == IRQ_FREE || irq == IRQ_SYSCALL)
1807 #ifdef KDTRACE_HOOKS
1808 if (irq == IRQ_DTRACE_RET)
1812 if (irq == IRQ_EVTCHN)
1815 db_printf("vec 0x%2x -> ", i + APIC_IO_INTS);
1816 if (irq == IRQ_TIMER)
1817 db_printf("lapic timer\n");
1818 else if (irq < num_io_irqs) {
1819 isrc = intr_lookup_source(irq);
1820 if (isrc == NULL || verbose == 0)
1821 db_printf("IRQ %u\n", irq);
1823 db_dump_intr_event(isrc->is_event,
1826 db_printf("IRQ %u ???\n", irq);
1832 dump_mask(const char *prefix, uint32_t v, int base)
1837 for (i = 0; i < 32; i++)
1840 db_printf("%s:", prefix);
1843 db_printf(" %02x", base + i);
1849 /* Show info from the lapic regs for this CPU. */
1850 DB_SHOW_COMMAND(lapic, db_show_lapic)
1854 db_printf("lapic ID = %d\n", lapic_id());
1855 v = lapic_read32(LAPIC_VERSION);
1856 db_printf("version = %d.%d\n", (v & APIC_VER_VERSION) >> 4,
1858 db_printf("max LVT = %d\n", (v & APIC_VER_MAXLVT) >> MAXLVTSHIFT);
1859 v = lapic_read32(LAPIC_SVR);
1860 db_printf("SVR = %02x (%s)\n", v & APIC_SVR_VECTOR,
1861 v & APIC_SVR_ENABLE ? "enabled" : "disabled");
1862 db_printf("TPR = %02x\n", lapic_read32(LAPIC_TPR));
1864 #define dump_field(prefix, regn, index) \
1865 dump_mask(__XSTRING(prefix ## index), \
1866 lapic_read32(LAPIC_ ## regn ## index), \
1869 db_printf("In-service Interrupts:\n");
1870 dump_field(isr, ISR, 0);
1871 dump_field(isr, ISR, 1);
1872 dump_field(isr, ISR, 2);
1873 dump_field(isr, ISR, 3);
1874 dump_field(isr, ISR, 4);
1875 dump_field(isr, ISR, 5);
1876 dump_field(isr, ISR, 6);
1877 dump_field(isr, ISR, 7);
1879 db_printf("TMR Interrupts:\n");
1880 dump_field(tmr, TMR, 0);
1881 dump_field(tmr, TMR, 1);
1882 dump_field(tmr, TMR, 2);
1883 dump_field(tmr, TMR, 3);
1884 dump_field(tmr, TMR, 4);
1885 dump_field(tmr, TMR, 5);
1886 dump_field(tmr, TMR, 6);
1887 dump_field(tmr, TMR, 7);
1889 db_printf("IRR Interrupts:\n");
1890 dump_field(irr, IRR, 0);
1891 dump_field(irr, IRR, 1);
1892 dump_field(irr, IRR, 2);
1893 dump_field(irr, IRR, 3);
1894 dump_field(irr, IRR, 4);
1895 dump_field(irr, IRR, 5);
1896 dump_field(irr, IRR, 6);
1897 dump_field(irr, IRR, 7);
1904 * APIC probing support code. This includes code to manage enumerators.
1907 static SLIST_HEAD(, apic_enumerator) enumerators =
1908 SLIST_HEAD_INITIALIZER(enumerators);
1909 static struct apic_enumerator *best_enum;
1912 apic_register_enumerator(struct apic_enumerator *enumerator)
1915 struct apic_enumerator *apic_enum;
1917 SLIST_FOREACH(apic_enum, &enumerators, apic_next) {
1918 if (apic_enum == enumerator)
1919 panic("%s: Duplicate register of %s", __func__,
1920 enumerator->apic_name);
1923 SLIST_INSERT_HEAD(&enumerators, enumerator, apic_next);
1927 * We have to look for CPU's very, very early because certain subsystems
1928 * want to know how many CPU's we have extremely early on in the boot
1932 apic_init(void *dummy __unused)
1934 struct apic_enumerator *enumerator;
1937 /* We only support built in local APICs. */
1938 if (!(cpu_feature & CPUID_APIC))
1941 /* Don't probe if APIC mode is disabled. */
1942 if (resource_disabled("apic", 0))
1945 /* Probe all the enumerators to find the best match. */
1948 SLIST_FOREACH(enumerator, &enumerators, apic_next) {
1949 retval = enumerator->apic_probe();
1952 if (best_enum == NULL || best < retval) {
1953 best_enum = enumerator;
1957 if (best_enum == NULL) {
1959 printf("APIC: Could not find any APICs.\n");
1961 panic("running without device atpic requires a local APIC");
1967 printf("APIC: Using the %s enumerator.\n",
1968 best_enum->apic_name);
1972 * To work around an errata, we disable the local APIC on some
1973 * CPUs during early startup. We need to turn the local APIC back
1974 * on on such CPUs now.
1976 ppro_reenable_apic();
1979 /* Probe the CPU's in the system. */
1980 retval = best_enum->apic_probe_cpus();
1982 printf("%s: Failed to probe CPUs: returned %d\n",
1983 best_enum->apic_name, retval);
1986 SYSINIT(apic_init, SI_SUB_TUNABLES - 1, SI_ORDER_SECOND, apic_init, NULL);
1989 * Setup the local APIC. We have to do this prior to starting up the APs
1993 apic_setup_local(void *dummy __unused)
1997 if (best_enum == NULL)
2000 lapics = malloc(sizeof(*lapics) * (max_apic_id + 1), M_LAPIC,
2003 /* Initialize the local APIC. */
2004 retval = best_enum->apic_setup_local();
2006 printf("%s: Failed to setup the local APIC: returned %d\n",
2007 best_enum->apic_name, retval);
2009 SYSINIT(apic_setup_local, SI_SUB_CPU, SI_ORDER_SECOND, apic_setup_local, NULL);
2012 * Setup the I/O APICs.
2015 apic_setup_io(void *dummy __unused)
2019 if (best_enum == NULL)
2023 * Local APIC must be registered before other PICs and pseudo PICs
2024 * for proper suspend/resume order.
2026 intr_register_pic(&lapic_pic);
2028 retval = best_enum->apic_setup_io();
2030 printf("%s: Failed to setup I/O APICs: returned %d\n",
2031 best_enum->apic_name, retval);
2034 * Finish setting up the local APIC on the BSP once we know
2035 * how to properly program the LINT pins. In particular, this
2036 * enables the EOI suppression mode, if LAPIC supports it and
2037 * user did not disable the mode.
2043 /* Enable the MSI "pic". */
2044 init_ops.msi_init();
2047 xen_intr_alloc_irqs();
2050 SYSINIT(apic_setup_io, SI_SUB_INTR, SI_ORDER_THIRD, apic_setup_io, NULL);
2054 * Inter Processor Interrupt functions. The lapic_ipi_*() functions are
2055 * private to the MD code. The public interface for the rest of the
2056 * kernel is defined in mp_machdep.c.
2060 * Wait delay microseconds for IPI to be sent. If delay is -1, we
2064 native_lapic_ipi_wait(int delay)
2068 /* LAPIC_ICR.APIC_DELSTAT_MASK is undefined in x2APIC mode */
2072 for (rx = 0; delay == -1 || rx < lapic_ipi_wait_mult * delay; rx++) {
2073 if ((lapic_read_icr_lo() & APIC_DELSTAT_MASK) ==
2082 native_lapic_ipi_raw(register_t icrlo, u_int dest)
2086 /* XXX: Need more sanity checking of icrlo? */
2087 KASSERT(x2apic_mode || lapic_map != NULL,
2088 ("%s called too early", __func__));
2089 KASSERT(x2apic_mode ||
2090 (dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
2091 ("%s: invalid dest field", __func__));
2092 KASSERT((icrlo & APIC_ICRLO_RESV_MASK) == 0,
2093 ("%s: reserved bits set in ICR LO register", __func__));
2095 if ((icrlo & APIC_DEST_MASK) == APIC_DEST_DESTFLD) {
2099 icrhi = dest << APIC_ID_SHIFT;
2100 lapic_write_icr(icrhi, icrlo);
2102 lapic_write_icr_lo(icrlo);
2106 #ifdef DETECT_DEADLOCK
2107 #define AFTER_SPIN 50
2111 native_lapic_ipi_vectored(u_int vector, int dest)
2113 register_t icrlo, destfield;
2115 KASSERT((vector & ~APIC_VECTOR_MASK) == 0,
2116 ("%s: invalid vector %d", __func__, vector));
2120 case APIC_IPI_DEST_SELF:
2121 if (x2apic_mode && vector < IPI_NMI_FIRST) {
2122 lapic_write_self_ipi(vector);
2125 icrlo = APIC_DEST_SELF;
2127 case APIC_IPI_DEST_ALL:
2128 icrlo = APIC_DEST_ALLISELF;
2130 case APIC_IPI_DEST_OTHERS:
2131 icrlo = APIC_DEST_ALLESELF;
2135 KASSERT(x2apic_mode ||
2136 (dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
2137 ("%s: invalid destination 0x%x", __func__, dest));
2142 * NMI IPIs are just fake vectors used to send a NMI. Use special rules
2143 * regarding NMIs if passed, otherwise specify the vector.
2145 if (vector >= IPI_NMI_FIRST)
2146 icrlo |= APIC_DELMODE_NMI;
2148 icrlo |= vector | APIC_DELMODE_FIXED;
2149 icrlo |= APIC_DESTMODE_PHY | APIC_TRIGMOD_EDGE | APIC_LEVEL_ASSERT;
2151 /* Wait for an earlier IPI to finish. */
2152 if (!lapic_ipi_wait(lapic_ds_idle_timeout)) {
2153 if (KERNEL_PANICKED())
2156 panic("APIC: Previous IPI is stuck");
2159 lapic_ipi_raw(icrlo, destfield);
2161 #ifdef DETECT_DEADLOCK
2162 /* Wait for IPI to be delivered. */
2163 if (!lapic_ipi_wait(AFTER_SPIN)) {
2164 #ifdef needsattention
2168 * The above function waits for the message to actually be
2169 * delivered. It breaks out after an arbitrary timeout
2170 * since the message should eventually be delivered (at
2171 * least in theory) and that if it wasn't we would catch
2172 * the failure with the check above when the next IPI is
2175 * We could skip this wait entirely, EXCEPT it probably
2176 * protects us from other routines that assume that the
2177 * message was delivered and acted upon when this function
2180 printf("APIC: IPI might be stuck\n");
2181 #else /* !needsattention */
2182 /* Wait until mesage is sent without a timeout. */
2183 while (lapic_read_icr_lo() & APIC_DELSTAT_PEND)
2185 #endif /* needsattention */
2187 #endif /* DETECT_DEADLOCK */
2193 * Since the IDT is shared by all CPUs the IPI slot update needs to be globally
2196 * Consider the case where an IPI is generated immediately after allocation:
2197 * vector = lapic_ipi_alloc(ipifunc);
2198 * ipi_selected(other_cpus, vector);
2200 * In xAPIC mode a write to ICR_LO has serializing semantics because the
2201 * APIC page is mapped as an uncached region. In x2APIC mode there is an
2202 * explicit 'mfence' before the ICR MSR is written. Therefore in both cases
2203 * the IDT slot update is globally visible before the IPI is delivered.
2206 native_lapic_ipi_alloc(inthand_t *ipifunc)
2208 struct gate_descriptor *ip;
2212 KASSERT(ipifunc != &IDTVEC(rsvd) && ipifunc != &IDTVEC(rsvd_pti),
2213 ("invalid ipifunc %p", ipifunc));
2216 mtx_lock_spin(&icu_lock);
2217 for (idx = IPI_DYN_FIRST; idx <= IPI_DYN_LAST; idx++) {
2219 func = (ip->gd_hioffset << 16) | ip->gd_looffset;
2221 func -= setidt_disp;
2223 if ((!pti && func == (uintptr_t)&IDTVEC(rsvd)) ||
2224 (pti && func == (uintptr_t)&IDTVEC(rsvd_pti))) {
2226 setidt(vector, ipifunc, SDT_APIC, SEL_KPL, GSEL_APIC);
2230 mtx_unlock_spin(&icu_lock);
2235 native_lapic_ipi_free(int vector)
2237 struct gate_descriptor *ip;
2240 KASSERT(vector >= IPI_DYN_FIRST && vector <= IPI_DYN_LAST,
2241 ("%s: invalid vector %d", __func__, vector));
2243 mtx_lock_spin(&icu_lock);
2245 func = (ip->gd_hioffset << 16) | ip->gd_looffset;
2247 func -= setidt_disp;
2249 KASSERT(func != (uintptr_t)&IDTVEC(rsvd) &&
2250 func != (uintptr_t)&IDTVEC(rsvd_pti),
2251 ("invalid idtfunc %#lx", func));
2252 setidt(vector, pti ? &IDTVEC(rsvd_pti) : &IDTVEC(rsvd), SDT_APIC,
2253 SEL_KPL, GSEL_APIC);
2254 mtx_unlock_spin(&icu_lock);