2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1996, by Steve Passe
6 * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. The name of the developer may NOT be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 * 3. Neither the name of the author nor the names of any co-contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * Local APIC support on Pentium and later processors.
36 #include <sys/cdefs.h>
37 #include "opt_atpic.h"
38 #include "opt_hwpmc_hooks.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
46 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
52 #include <sys/sched.h>
54 #include <sys/sysctl.h>
55 #include <sys/timeet.h>
56 #include <sys/timetc.h>
61 #include <x86/apicreg.h>
62 #include <machine/clock.h>
63 #include <machine/cpufunc.h>
64 #include <machine/cputypes.h>
65 #include <machine/fpu.h>
66 #include <machine/frame.h>
67 #include <machine/intr_machdep.h>
68 #include <x86/apicvar.h>
70 #include <machine/md_var.h>
71 #include <machine/smp.h>
72 #include <machine/specialreg.h>
76 #include <sys/interrupt.h>
81 #define SDT_APIC SDT_SYSIGT
84 #define SDT_APIC SDT_SYS386IGT
85 #define GSEL_APIC GSEL(GCODE_SEL, SEL_KPL)
88 static MALLOC_DEFINE(M_LAPIC, "local_apic", "Local APIC items");
90 /* Sanity checks on IDT vectors. */
91 CTASSERT(APIC_IO_INTS + APIC_NUM_IOINTS == APIC_TIMER_INT);
92 CTASSERT(APIC_TIMER_INT < APIC_LOCAL_INTS);
93 CTASSERT(APIC_LOCAL_INTS == 240);
94 CTASSERT(IPI_STOP < APIC_SPURIOUS_INT);
97 * I/O interrupts use non-negative IRQ values. These values are used
98 * to mark unused IDT entries or IDT entries reserved for a non-I/O
103 #define IRQ_SYSCALL -3
104 #define IRQ_DTRACE_RET -4
105 #define IRQ_EVTCHN -5
107 enum lat_timer_mode {
109 LAT_MODE_PERIODIC = 1,
110 LAT_MODE_ONESHOT = 2,
111 LAT_MODE_DEADLINE = 3,
115 * Support for local APICs. Local APICs manage interrupts on each
116 * individual processor as opposed to I/O APICs which receive interrupts
117 * from I/O devices and then forward them on to the local APICs.
119 * Local APICs can also send interrupts to each other thus providing the
120 * mechanism for IPIs.
124 u_int lvt_edgetrigger:1;
125 u_int lvt_activehi:1;
133 struct lvt la_lvts[APIC_LVT_MAX + 1];
134 struct lvt la_elvts[APIC_ELVT_MAX + 1];
137 u_int la_cluster_id:2;
139 u_long *la_timer_count;
140 uint64_t la_timer_period;
141 enum lat_timer_mode la_timer_mode;
142 uint32_t lvt_timer_base;
143 uint32_t lvt_timer_last;
144 /* Include IDT_SYSCALL to make indexing easier. */
145 int la_ioint_irqs[APIC_NUM_IOINTS + 1];
148 /* Global defaults for local APIC LVT entries. */
149 static struct lvt lvts[APIC_LVT_MAX + 1] = {
150 { 1, 1, 1, 1, APIC_LVT_DM_EXTINT, 0 }, /* LINT0: masked ExtINT */
151 { 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 }, /* LINT1: NMI */
152 { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_TIMER_INT }, /* Timer */
153 { 1, 1, 0, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT }, /* Error */
154 { 1, 1, 1, 1, APIC_LVT_DM_NMI, 0 }, /* PMC */
155 { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_THERMAL_INT }, /* Thermal */
156 { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_CMC_INT }, /* CMCI */
159 /* Global defaults for AMD local APIC ELVT entries. */
160 static struct lvt elvts[APIC_ELVT_MAX + 1] = {
161 { 1, 1, 1, 0, APIC_LVT_DM_FIXED, 0 },
162 { 1, 1, 1, 0, APIC_LVT_DM_FIXED, APIC_CMC_INT },
163 { 1, 1, 1, 0, APIC_LVT_DM_FIXED, 0 },
164 { 1, 1, 1, 0, APIC_LVT_DM_FIXED, 0 },
167 static inthand_t *ioint_handlers[] = {
169 IDTVEC(apic_isr1), /* 32 - 63 */
170 IDTVEC(apic_isr2), /* 64 - 95 */
171 IDTVEC(apic_isr3), /* 96 - 127 */
172 IDTVEC(apic_isr4), /* 128 - 159 */
173 IDTVEC(apic_isr5), /* 160 - 191 */
174 IDTVEC(apic_isr6), /* 192 - 223 */
175 IDTVEC(apic_isr7), /* 224 - 255 */
178 static inthand_t *ioint_pti_handlers[] = {
180 IDTVEC(apic_isr1_pti), /* 32 - 63 */
181 IDTVEC(apic_isr2_pti), /* 64 - 95 */
182 IDTVEC(apic_isr3_pti), /* 96 - 127 */
183 IDTVEC(apic_isr4_pti), /* 128 - 159 */
184 IDTVEC(apic_isr5_pti), /* 160 - 191 */
185 IDTVEC(apic_isr6_pti), /* 192 - 223 */
186 IDTVEC(apic_isr7_pti), /* 224 - 255 */
189 static u_int32_t lapic_timer_divisors[] = {
190 APIC_TDCR_1, APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16,
191 APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128
194 extern inthand_t IDTVEC(rsvd_pti), IDTVEC(rsvd);
196 volatile char *lapic_map;
197 vm_paddr_t lapic_paddr = DEFAULT_APIC_BASE;
199 int lapic_eoi_suppression;
200 static int lapic_timer_tsc_deadline;
201 static u_long lapic_timer_divisor, count_freq;
202 static struct eventtimer lapic_et;
204 static uint64_t lapic_ipi_wait_mult;
205 static int __read_mostly lapic_ds_idle_timeout = 1000000;
207 unsigned int max_apic_id;
209 SYSCTL_NODE(_hw, OID_AUTO, apic, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
211 SYSCTL_INT(_hw_apic, OID_AUTO, x2apic_mode, CTLFLAG_RD, &x2apic_mode, 0, "");
212 SYSCTL_INT(_hw_apic, OID_AUTO, eoi_suppression, CTLFLAG_RD,
213 &lapic_eoi_suppression, 0, "");
214 SYSCTL_INT(_hw_apic, OID_AUTO, timer_tsc_deadline, CTLFLAG_RD,
215 &lapic_timer_tsc_deadline, 0, "");
217 SYSCTL_INT(_hw_apic, OID_AUTO, ds_idle_timeout, CTLFLAG_RWTUN,
218 &lapic_ds_idle_timeout, 0,
219 "timeout (in us) for APIC Delivery Status to become Idle (xAPIC only)");
222 static void lapic_calibrate_initcount(struct lapic *la);
225 * Use __nosanitizethread to exempt the LAPIC I/O accessors from KCSan
226 * instrumentation. Otherwise, if x2APIC is not available, use of the global
227 * lapic_map will generate a KCSan false positive. While the mapping is
228 * shared among all CPUs, the physical access will always take place on the
229 * local CPU's APIC, so there isn't in fact a race here. Furthermore, the
230 * KCSan warning printf can cause a panic if issued during LAPIC access,
231 * due to attempted recursive use of event timer resources.
234 static uint32_t __nosanitizethread
235 lapic_read32(enum LAPIC_REGISTERS reg)
240 res = rdmsr32(MSR_APIC_000 + reg);
242 res = *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL);
247 static void __nosanitizethread
248 lapic_write32(enum LAPIC_REGISTERS reg, uint32_t val)
254 wrmsr(MSR_APIC_000 + reg, val);
256 *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL) = val;
260 static void __nosanitizethread
261 lapic_write32_nofence(enum LAPIC_REGISTERS reg, uint32_t val)
265 wrmsr(MSR_APIC_000 + reg, val);
267 *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL) = val;
273 lapic_read_icr_lo(void)
276 return (lapic_read32(LAPIC_ICR_LO));
280 lapic_write_icr(uint32_t vhi, uint32_t vlo)
286 v = ((uint64_t)vhi << 32) | vlo;
288 wrmsr(MSR_APIC_000 + LAPIC_ICR_LO, v);
290 saveintr = intr_disable();
291 lapic_write32(LAPIC_ICR_HI, vhi);
292 lapic_write32(LAPIC_ICR_LO, vlo);
293 intr_restore(saveintr);
298 lapic_write_icr_lo(uint32_t vlo)
303 wrmsr(MSR_APIC_000 + LAPIC_ICR_LO, vlo);
305 lapic_write32(LAPIC_ICR_LO, vlo);
310 lapic_write_self_ipi(uint32_t vector)
313 KASSERT(x2apic_mode, ("SELF IPI write in xAPIC mode"));
314 wrmsr(MSR_APIC_000 + LAPIC_SELF_IPI, vector);
319 native_lapic_enable_x2apic(void)
323 apic_base = rdmsr(MSR_APICBASE);
324 apic_base |= APICBASE_X2APIC | APICBASE_ENABLED;
325 wrmsr(MSR_APICBASE, apic_base);
329 native_lapic_is_x2apic(void)
333 apic_base = rdmsr(MSR_APICBASE);
334 return ((apic_base & (APICBASE_X2APIC | APICBASE_ENABLED)) ==
335 (APICBASE_X2APIC | APICBASE_ENABLED));
338 static void lapic_enable(void);
339 static void lapic_resume(struct pic *pic, bool suspend_cancelled);
340 static void lapic_timer_oneshot(struct lapic *);
341 static void lapic_timer_oneshot_nointr(struct lapic *, uint32_t);
342 static void lapic_timer_periodic(struct lapic *);
343 static void lapic_timer_deadline(struct lapic *);
344 static void lapic_timer_stop(struct lapic *);
345 static void lapic_timer_set_divisor(u_int divisor);
346 static uint32_t lvt_mode(struct lapic *la, u_int pin, uint32_t value);
347 static int lapic_et_start(struct eventtimer *et,
348 sbintime_t first, sbintime_t period);
349 static int lapic_et_stop(struct eventtimer *et);
350 static u_int apic_idt_to_irq(u_int apic_id, u_int vector);
351 static void lapic_set_tpr(u_int vector);
353 struct pic lapic_pic = { .pic_resume = lapic_resume };
355 /* Forward declarations for apic_ops */
356 static void native_lapic_create(u_int apic_id, int boot_cpu);
357 static void native_lapic_init(vm_paddr_t addr);
358 static void native_lapic_xapic_mode(void);
359 static void native_lapic_setup(int boot);
360 static void native_lapic_dump(const char *str);
361 static void native_lapic_disable(void);
362 static void native_lapic_eoi(void);
363 static int native_lapic_id(void);
364 static int native_lapic_intr_pending(u_int vector);
365 static u_int native_apic_cpuid(u_int apic_id);
366 static u_int native_apic_alloc_vector(u_int apic_id, u_int irq);
367 static u_int native_apic_alloc_vectors(u_int apic_id, u_int *irqs,
368 u_int count, u_int align);
369 static void native_apic_disable_vector(u_int apic_id, u_int vector);
370 static void native_apic_enable_vector(u_int apic_id, u_int vector);
371 static void native_apic_free_vector(u_int apic_id, u_int vector, u_int irq);
372 static void native_lapic_set_logical_id(u_int apic_id, u_int cluster,
374 static void native_lapic_calibrate_timer(void);
375 static int native_lapic_enable_pmc(void);
376 static void native_lapic_disable_pmc(void);
377 static void native_lapic_reenable_pmc(void);
378 static void native_lapic_enable_cmc(void);
379 static int native_lapic_enable_mca_elvt(void);
380 static int native_lapic_set_lvt_mask(u_int apic_id, u_int lvt,
382 static int native_lapic_set_lvt_mode(u_int apic_id, u_int lvt,
384 static int native_lapic_set_lvt_polarity(u_int apic_id, u_int lvt,
385 enum intr_polarity pol);
386 static int native_lapic_set_lvt_triggermode(u_int apic_id, u_int lvt,
387 enum intr_trigger trigger);
389 static void native_lapic_ipi_raw(register_t icrlo, u_int dest);
390 static void native_lapic_ipi_vectored(u_int vector, int dest);
391 static int native_lapic_ipi_wait(int delay);
393 static int native_lapic_ipi_alloc(inthand_t *ipifunc);
394 static void native_lapic_ipi_free(int vector);
396 struct apic_ops apic_ops = {
397 .create = native_lapic_create,
398 .init = native_lapic_init,
399 .xapic_mode = native_lapic_xapic_mode,
400 .is_x2apic = native_lapic_is_x2apic,
401 .setup = native_lapic_setup,
402 .dump = native_lapic_dump,
403 .disable = native_lapic_disable,
404 .eoi = native_lapic_eoi,
405 .id = native_lapic_id,
406 .intr_pending = native_lapic_intr_pending,
407 .set_logical_id = native_lapic_set_logical_id,
408 .cpuid = native_apic_cpuid,
409 .alloc_vector = native_apic_alloc_vector,
410 .alloc_vectors = native_apic_alloc_vectors,
411 .enable_vector = native_apic_enable_vector,
412 .disable_vector = native_apic_disable_vector,
413 .free_vector = native_apic_free_vector,
414 .calibrate_timer = native_lapic_calibrate_timer,
415 .enable_pmc = native_lapic_enable_pmc,
416 .disable_pmc = native_lapic_disable_pmc,
417 .reenable_pmc = native_lapic_reenable_pmc,
418 .enable_cmc = native_lapic_enable_cmc,
419 .enable_mca_elvt = native_lapic_enable_mca_elvt,
421 .ipi_raw = native_lapic_ipi_raw,
422 .ipi_vectored = native_lapic_ipi_vectored,
423 .ipi_wait = native_lapic_ipi_wait,
425 .ipi_alloc = native_lapic_ipi_alloc,
426 .ipi_free = native_lapic_ipi_free,
427 .set_lvt_mask = native_lapic_set_lvt_mask,
428 .set_lvt_mode = native_lapic_set_lvt_mode,
429 .set_lvt_polarity = native_lapic_set_lvt_polarity,
430 .set_lvt_triggermode = native_lapic_set_lvt_triggermode,
434 lvt_mode_impl(struct lapic *la, struct lvt *lvt, u_int pin, uint32_t value)
437 value &= ~(APIC_LVT_M | APIC_LVT_TM | APIC_LVT_IIPP | APIC_LVT_DM |
439 if (lvt->lvt_edgetrigger == 0)
440 value |= APIC_LVT_TM;
441 if (lvt->lvt_activehi == 0)
442 value |= APIC_LVT_IIPP_INTALO;
445 value |= lvt->lvt_mode;
446 switch (lvt->lvt_mode) {
447 case APIC_LVT_DM_NMI:
448 case APIC_LVT_DM_SMI:
449 case APIC_LVT_DM_INIT:
450 case APIC_LVT_DM_EXTINT:
451 if (!lvt->lvt_edgetrigger && bootverbose) {
452 printf("lapic%u: Forcing LINT%u to edge trigger\n",
454 value &= ~APIC_LVT_TM;
456 /* Use a vector of 0. */
458 case APIC_LVT_DM_FIXED:
459 value |= lvt->lvt_vector;
462 panic("bad APIC LVT delivery mode: %#x\n", value);
468 lvt_mode(struct lapic *la, u_int pin, uint32_t value)
472 KASSERT(pin <= APIC_LVT_MAX,
473 ("%s: pin %u out of range", __func__, pin));
474 if (la->la_lvts[pin].lvt_active)
475 lvt = &la->la_lvts[pin];
479 return (lvt_mode_impl(la, lvt, pin, value));
483 elvt_mode(struct lapic *la, u_int idx, uint32_t value)
487 KASSERT(idx <= APIC_ELVT_MAX,
488 ("%s: idx %u out of range", __func__, idx));
490 elvt = &la->la_elvts[idx];
491 KASSERT(elvt->lvt_active, ("%s: ELVT%u is not active", __func__, idx));
492 KASSERT(elvt->lvt_edgetrigger,
493 ("%s: ELVT%u is not edge triggered", __func__, idx));
494 KASSERT(elvt->lvt_activehi,
495 ("%s: ELVT%u is not active high", __func__, idx));
496 return (lvt_mode_impl(la, elvt, idx, value));
500 * Map the local APIC and setup necessary interrupt vectors.
503 native_lapic_init(vm_paddr_t addr)
506 uint64_t r, r1, r2, rx;
513 * Enable x2APIC mode if possible. Map the local APIC
516 * Keep the LAPIC registers page mapped uncached for x2APIC
517 * mode too, to have direct map page attribute set to
518 * uncached. This is needed to work around CPU errata present
519 * on all Intel processors.
521 KASSERT(trunc_page(addr) == addr,
522 ("local APIC not aligned on a page boundary"));
524 lapic_map = pmap_mapdev(addr, PAGE_SIZE);
526 native_lapic_enable_x2apic();
530 /* Setup the spurious interrupt handler. */
531 setidt(APIC_SPURIOUS_INT, IDTVEC(spuriousint), SDT_APIC, SEL_KPL,
534 /* Perform basic initialization of the BSP's local APIC. */
537 /* Set BSP's per-CPU local APIC ID. */
538 PCPU_SET(apic_id, lapic_id());
540 /* Local APIC timer interrupt. */
541 setidt(APIC_TIMER_INT, pti ? IDTVEC(timerint_pti) : IDTVEC(timerint),
542 SDT_APIC, SEL_KPL, GSEL_APIC);
544 /* Local APIC error interrupt. */
545 setidt(APIC_ERROR_INT, pti ? IDTVEC(errorint_pti) : IDTVEC(errorint),
546 SDT_APIC, SEL_KPL, GSEL_APIC);
548 /* XXX: Thermal interrupt */
550 /* Local APIC CMCI. */
551 setidt(APIC_CMC_INT, pti ? IDTVEC(cmcint_pti) : IDTVEC(cmcint),
552 SDT_APIC, SEL_KPL, GSEL_APIC);
554 if ((resource_int_value("apic", 0, "clock", &i) != 0 || i != 0)) {
555 /* Set if APIC timer runs in C3. */
556 arat = (cpu_power_eax & CPUTPM1_ARAT);
558 bzero(&lapic_et, sizeof(lapic_et));
559 lapic_et.et_name = "LAPIC";
560 lapic_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT |
562 lapic_et.et_quality = 600;
564 lapic_et.et_flags |= ET_FLAGS_C3STOP;
565 lapic_et.et_quality = 100;
567 if ((cpu_feature & CPUID_TSC) != 0 &&
568 (cpu_feature2 & CPUID2_TSCDLT) != 0 &&
569 tsc_is_invariant && tsc_freq != 0) {
570 lapic_timer_tsc_deadline = 1;
571 TUNABLE_INT_FETCH("hw.lapic_tsc_deadline",
572 &lapic_timer_tsc_deadline);
575 lapic_et.et_frequency = 0;
576 /* We don't know frequency yet, so trying to guess. */
577 lapic_et.et_min_period = 0x00001000LL;
578 lapic_et.et_max_period = SBT_1S;
579 lapic_et.et_start = lapic_et_start;
580 lapic_et.et_stop = lapic_et_stop;
581 lapic_et.et_priv = NULL;
582 et_register(&lapic_et);
586 * Set lapic_eoi_suppression after lapic_enable(), to not
587 * enable suppression in the hardware prematurely. Note that
588 * we by default enable suppression even when system only has
589 * one IO-APIC, since EOI is broadcasted to all APIC agents,
590 * including CPUs, otherwise.
592 * It seems that at least some KVM versions report
593 * EOI_SUPPRESSION bit, but auto-EOI does not work.
595 ver = lapic_read32(LAPIC_VERSION);
596 if ((ver & APIC_VER_EOI_SUPPRESSION) != 0) {
597 lapic_eoi_suppression = 1;
598 if (vm_guest == VM_GUEST_KVM) {
601 "KVM -- disabling lapic eoi suppression\n");
602 lapic_eoi_suppression = 0;
604 TUNABLE_INT_FETCH("hw.lapic_eoi_suppression",
605 &lapic_eoi_suppression);
611 * Calibrate the busy loop waiting for IPI ack in xAPIC mode.
612 * lapic_ipi_wait_mult contains the number of iterations which
613 * approximately delay execution for 1 microsecond (the
614 * argument to native_lapic_ipi_wait() is in microseconds).
616 * We assume that TSC is present and already measured.
617 * Possible TSC frequency jumps are irrelevant to the
618 * calibration loop below, the CPU clock management code is
619 * not yet started, and we do not enter sleep states.
621 KASSERT((cpu_feature & CPUID_TSC) != 0 && tsc_freq != 0,
622 ("TSC not initialized"));
625 for (rx = 0; rx < LOOPS; rx++) {
626 (void)lapic_read_icr_lo();
630 r1 = tsc_freq * LOOPS;
632 lapic_ipi_wait_mult = r1 >= r2 ? r1 / r2 : 1;
634 printf("LAPIC: ipi_wait() us multiplier %ju (r %ju "
635 "tsc %ju)\n", (uintmax_t)lapic_ipi_wait_mult,
636 (uintmax_t)r, (uintmax_t)tsc_freq);
644 * Create a local APIC instance.
647 native_lapic_create(u_int apic_id, int boot_cpu)
651 if (apic_id > max_apic_id) {
652 printf("APIC: Ignoring local APIC with ID %d\n", apic_id);
654 panic("Can't ignore BSP");
657 KASSERT(!lapics[apic_id].la_present, ("duplicate local APIC %u",
661 * Assume no local LVT overrides and a cluster of 0 and
662 * intra-cluster ID of 0.
664 lapics[apic_id].la_present = 1;
665 lapics[apic_id].la_id = apic_id;
666 for (i = 0; i <= APIC_LVT_MAX; i++) {
667 lapics[apic_id].la_lvts[i] = lvts[i];
668 lapics[apic_id].la_lvts[i].lvt_active = 0;
670 for (i = 0; i <= APIC_ELVT_MAX; i++) {
671 lapics[apic_id].la_elvts[i] = elvts[i];
672 lapics[apic_id].la_elvts[i].lvt_active = 0;
674 for (i = 0; i <= APIC_NUM_IOINTS; i++)
675 lapics[apic_id].la_ioint_irqs[i] = IRQ_FREE;
676 lapics[apic_id].la_ioint_irqs[IDT_SYSCALL - APIC_IO_INTS] = IRQ_SYSCALL;
677 lapics[apic_id].la_ioint_irqs[APIC_TIMER_INT - APIC_IO_INTS] =
680 lapics[apic_id].la_ioint_irqs[IDT_DTRACE_RET - APIC_IO_INTS] =
684 lapics[apic_id].la_ioint_irqs[IDT_EVTCHN - APIC_IO_INTS] = IRQ_EVTCHN;
688 cpu_add(apic_id, boot_cpu);
692 static inline uint32_t
693 amd_read_ext_features(void)
697 if (cpu_vendor_id != CPU_VENDOR_AMD &&
698 cpu_vendor_id != CPU_VENDOR_HYGON)
700 version = lapic_read32(LAPIC_VERSION);
701 if ((version & APIC_VER_AMD_EXT_SPACE) != 0)
702 return (lapic_read32(LAPIC_EXT_FEATURES));
707 static inline uint32_t
708 amd_read_elvt_count(void)
713 extf = amd_read_ext_features();
714 count = (extf & APIC_EXTF_ELVT_MASK) >> APIC_EXTF_ELVT_SHIFT;
715 count = min(count, APIC_ELVT_MAX + 1);
720 * Dump contents of local APIC registers
723 native_lapic_dump(const char* str)
731 version = lapic_read32(LAPIC_VERSION);
732 maxlvt = (version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
733 printf("cpu%d %s:\n", PCPU_GET(cpuid), str);
734 printf(" ID: 0x%08x VER: 0x%08x LDR: 0x%08x DFR: 0x%08x",
735 lapic_read32(LAPIC_ID), version,
736 lapic_read32(LAPIC_LDR), x2apic_mode ? 0 : lapic_read32(LAPIC_DFR));
737 if ((cpu_feature2 & CPUID2_X2APIC) != 0)
738 printf(" x2APIC: %d", x2apic_mode);
739 printf("\n lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
740 lapic_read32(LAPIC_LVT_LINT0), lapic_read32(LAPIC_LVT_LINT1),
741 lapic_read32(LAPIC_TPR), lapic_read32(LAPIC_SVR));
742 printf(" timer: 0x%08x therm: 0x%08x err: 0x%08x",
743 lapic_read32(LAPIC_LVT_TIMER), lapic_read32(LAPIC_LVT_THERMAL),
744 lapic_read32(LAPIC_LVT_ERROR));
745 if (maxlvt >= APIC_LVT_PMC)
746 printf(" pmc: 0x%08x", lapic_read32(LAPIC_LVT_PCINT));
748 if (maxlvt >= APIC_LVT_CMCI)
749 printf(" cmci: 0x%08x\n", lapic_read32(LAPIC_LVT_CMCI));
750 extf = amd_read_ext_features();
752 printf(" AMD ext features: 0x%08x", extf);
753 elvt_count = amd_read_elvt_count();
754 for (i = 0; i < elvt_count; i++)
755 printf("%s elvt%d: 0x%08x", (i % 4) ? "" : "\n ", i,
756 lapic_read32(LAPIC_EXT_LVT0 + i));
762 native_lapic_xapic_mode(void)
766 saveintr = intr_disable();
768 native_lapic_enable_x2apic();
769 intr_restore(saveintr);
773 native_lapic_setup(int boot)
782 saveintr = intr_disable();
784 la = &lapics[lapic_id()];
785 KASSERT(la->la_present, ("missing APIC structure"));
786 version = lapic_read32(LAPIC_VERSION);
787 maxlvt = (version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
789 /* Initialize the TPR to allow all interrupts. */
792 /* Setup spurious vector and enable the local APIC. */
795 /* Program LINT[01] LVT entries. */
796 lapic_write32(LAPIC_LVT_LINT0, lvt_mode(la, APIC_LVT_LINT0,
797 lapic_read32(LAPIC_LVT_LINT0)));
798 lapic_write32(LAPIC_LVT_LINT1, lvt_mode(la, APIC_LVT_LINT1,
799 lapic_read32(LAPIC_LVT_LINT1)));
801 /* Program the PMC LVT entry if present. */
802 if (maxlvt >= APIC_LVT_PMC) {
803 lapic_write32(LAPIC_LVT_PCINT, lvt_mode(la, APIC_LVT_PMC,
808 * Program the timer LVT. Calibration is deferred until it is certain
809 * that we have a reliable timecounter.
811 la->lvt_timer_base = lvt_mode(la, APIC_LVT_TIMER,
812 lapic_read32(LAPIC_LVT_TIMER));
813 la->lvt_timer_last = la->lvt_timer_base;
814 lapic_write32(LAPIC_LVT_TIMER, la->lvt_timer_base);
817 la->la_timer_mode = LAT_MODE_UNDEF;
818 else if (la->la_timer_mode != LAT_MODE_UNDEF) {
819 KASSERT(la->la_timer_period != 0, ("lapic%u: zero divisor",
821 switch (la->la_timer_mode) {
822 case LAT_MODE_PERIODIC:
823 lapic_timer_set_divisor(lapic_timer_divisor);
824 lapic_timer_periodic(la);
826 case LAT_MODE_ONESHOT:
827 lapic_timer_set_divisor(lapic_timer_divisor);
828 lapic_timer_oneshot(la);
830 case LAT_MODE_DEADLINE:
831 lapic_timer_deadline(la);
834 panic("corrupted la_timer_mode %p %d", la,
839 /* Program error LVT and clear any existing errors. */
840 lapic_write32(LAPIC_LVT_ERROR, lvt_mode(la, APIC_LVT_ERROR,
841 lapic_read32(LAPIC_LVT_ERROR)));
842 lapic_write32(LAPIC_ESR, 0);
844 /* XXX: Thermal LVT */
846 /* Program the CMCI LVT entry if present. */
847 if (maxlvt >= APIC_LVT_CMCI) {
848 lapic_write32(LAPIC_LVT_CMCI, lvt_mode(la, APIC_LVT_CMCI,
849 lapic_read32(LAPIC_LVT_CMCI)));
852 elvt_count = amd_read_elvt_count();
853 for (i = 0; i < elvt_count; i++) {
854 if (la->la_elvts[i].lvt_active)
855 lapic_write32(LAPIC_EXT_LVT0 + i,
856 elvt_mode(la, i, lapic_read32(LAPIC_EXT_LVT0 + i)));
859 intr_restore(saveintr);
863 native_lapic_intrcnt(void *dummy __unused)
867 char buf[MAXCOMLEN + 1];
869 /* If there are no APICs, skip this function. */
873 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
874 la = &lapics[pc->pc_apic_id];
878 snprintf(buf, sizeof(buf), "cpu%d:timer", pc->pc_cpuid);
879 intrcnt_add(buf, &la->la_timer_count);
882 SYSINIT(native_lapic_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, native_lapic_intrcnt,
886 native_lapic_reenable_pmc(void)
891 value = lapic_read32(LAPIC_LVT_PCINT);
892 value &= ~APIC_LVT_M;
893 lapic_write32(LAPIC_LVT_PCINT, value);
899 lapic_update_pmc(void *dummy)
903 la = &lapics[lapic_id()];
904 lapic_write32(LAPIC_LVT_PCINT, lvt_mode(la, APIC_LVT_PMC,
905 lapic_read32(LAPIC_LVT_PCINT)));
910 native_lapic_calibrate_timer(void)
916 /* Fail if the local APIC is not present. */
917 if (!x2apic_mode && lapic_map == NULL)
921 intr = intr_disable();
922 la = &lapics[lapic_id()];
924 lapic_calibrate_initcount(la);
928 if (lapic_timer_tsc_deadline && bootverbose) {
929 printf("lapic: deadline tsc mode, Frequency %ju Hz\n",
930 (uintmax_t)tsc_freq);
935 native_lapic_enable_pmc(void)
941 /* Fail if the local APIC is not present. */
942 if (!x2apic_mode && lapic_map == NULL)
946 /* Fail if the PMC LVT is not present. */
947 maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
948 if (maxlvt < APIC_LVT_PMC)
951 lvts[APIC_LVT_PMC].lvt_masked = 0;
953 #ifdef EARLY_AP_STARTUP
954 MPASS(mp_ncpus == 1 || smp_started);
955 smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
959 * If hwpmc was loaded at boot time then the APs may not be
960 * started yet. In that case, don't forward the request to
961 * them as they will program the lvt when they start.
964 smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
967 lapic_update_pmc(NULL);
976 native_lapic_disable_pmc(void)
982 /* Fail if the local APIC is not present. */
983 if (!x2apic_mode && lapic_map == NULL)
987 /* Fail if the PMC LVT is not present. */
988 maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
989 if (maxlvt < APIC_LVT_PMC)
992 lvts[APIC_LVT_PMC].lvt_masked = 1;
995 /* The APs should always be started when hwpmc is unloaded. */
996 KASSERT(mp_ncpus == 1 || smp_started, ("hwpmc unloaded too early"));
998 smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
1003 lapic_calibrate_initcount_cpuid_vm(void)
1008 /* Get value from CPUID leaf if possible. */
1009 if (vm_guest == VM_GUEST_NO)
1011 if (hv_high < 0x40000010)
1013 do_cpuid(0x40000010, regs);
1014 freq = (uint64_t)(regs[1]) * 1000;
1016 /* Pick timer divisor. */
1017 lapic_timer_divisor = 2;
1019 if (freq / lapic_timer_divisor < APIC_TIMER_MAX_COUNT)
1021 lapic_timer_divisor <<= 1;
1022 } while (lapic_timer_divisor <= 128);
1023 if (lapic_timer_divisor > 128)
1026 /* Record divided frequency. */
1027 count_freq = freq / lapic_timer_divisor;
1028 return (count_freq != 0);
1032 cb_lapic_getcount(void)
1035 return (APIC_TIMER_MAX_COUNT - lapic_read32(LAPIC_CCR_TIMER));
1039 lapic_calibrate_initcount(struct lapic *la)
1043 if (lapic_calibrate_initcount_cpuid_vm())
1046 /* Calibrate the APIC timer frequency. */
1047 lapic_timer_set_divisor(2);
1048 lapic_timer_oneshot_nointr(la, APIC_TIMER_MAX_COUNT);
1049 fpu_kern_enter(curthread, NULL, FPU_KERN_NOCTX);
1050 freq = clockcalib(cb_lapic_getcount, "lapic");
1051 fpu_kern_leave(curthread, NULL);
1053 /* Pick a different divisor if necessary. */
1054 lapic_timer_divisor = 2;
1056 if (freq * 2 / lapic_timer_divisor < APIC_TIMER_MAX_COUNT)
1058 lapic_timer_divisor <<= 1;
1059 } while (lapic_timer_divisor <= 128);
1060 if (lapic_timer_divisor > 128)
1061 panic("lapic: Divisor too big");
1062 count_freq = freq * 2 / lapic_timer_divisor;
1065 printf("lapic: Divisor %lu, Frequency %lu Hz\n",
1066 lapic_timer_divisor, count_freq);
1071 lapic_change_mode(struct eventtimer *et, struct lapic *la,
1072 enum lat_timer_mode newmode)
1074 if (la->la_timer_mode == newmode)
1077 case LAT_MODE_PERIODIC:
1078 lapic_timer_set_divisor(lapic_timer_divisor);
1079 et->et_frequency = count_freq;
1081 case LAT_MODE_DEADLINE:
1082 et->et_frequency = tsc_freq;
1084 case LAT_MODE_ONESHOT:
1085 lapic_timer_set_divisor(lapic_timer_divisor);
1086 et->et_frequency = count_freq;
1089 panic("lapic_change_mode %d", newmode);
1091 la->la_timer_mode = newmode;
1092 et->et_min_period = (0x00000002LLU << 32) / et->et_frequency;
1093 et->et_max_period = (0xfffffffeLLU << 32) / et->et_frequency;
1097 lapic_et_start(struct eventtimer *et, sbintime_t first, sbintime_t period)
1101 la = &lapics[PCPU_GET(apic_id)];
1103 lapic_change_mode(et, la, LAT_MODE_PERIODIC);
1104 la->la_timer_period = ((uint32_t)et->et_frequency * period) >>
1106 lapic_timer_periodic(la);
1107 } else if (lapic_timer_tsc_deadline) {
1108 lapic_change_mode(et, la, LAT_MODE_DEADLINE);
1109 la->la_timer_period = (et->et_frequency * first) >> 32;
1110 lapic_timer_deadline(la);
1112 lapic_change_mode(et, la, LAT_MODE_ONESHOT);
1113 la->la_timer_period = ((uint32_t)et->et_frequency * first) >>
1115 lapic_timer_oneshot(la);
1121 lapic_et_stop(struct eventtimer *et)
1125 la = &lapics[PCPU_GET(apic_id)];
1126 lapic_timer_stop(la);
1127 la->la_timer_mode = LAT_MODE_UNDEF;
1132 native_lapic_disable(void)
1136 /* Software disable the local APIC. */
1137 value = lapic_read32(LAPIC_SVR);
1138 value &= ~APIC_SVR_SWEN;
1139 lapic_write32(LAPIC_SVR, value);
1147 /* Program the spurious vector to enable the local APIC. */
1148 value = lapic_read32(LAPIC_SVR);
1149 value &= ~(APIC_SVR_VECTOR | APIC_SVR_FOCUS);
1150 value |= APIC_SVR_FEN | APIC_SVR_SWEN | APIC_SPURIOUS_INT;
1151 if (lapic_eoi_suppression)
1152 value |= APIC_SVR_EOI_SUPPRESSION;
1153 lapic_write32(LAPIC_SVR, value);
1156 /* Reset the local APIC on the BSP during resume. */
1158 lapic_resume(struct pic *pic, bool suspend_cancelled)
1165 native_lapic_id(void)
1169 KASSERT(x2apic_mode || lapic_map != NULL, ("local APIC is not mapped"));
1170 v = lapic_read32(LAPIC_ID);
1172 v >>= APIC_ID_SHIFT;
1177 native_lapic_intr_pending(u_int vector)
1182 * The IRR registers are an array of registers each of which
1183 * only describes 32 interrupts in the low 32 bits. Thus, we
1184 * divide the vector by 32 to get the register index.
1185 * Finally, we modulus the vector by 32 to determine the
1186 * individual bit to test.
1188 irr = lapic_read32(LAPIC_IRR0 + vector / 32);
1189 return (irr & 1 << (vector % 32));
1193 native_lapic_set_logical_id(u_int apic_id, u_int cluster, u_int cluster_id)
1197 KASSERT(lapics[apic_id].la_present, ("%s: APIC %u doesn't exist",
1198 __func__, apic_id));
1199 KASSERT(cluster <= APIC_MAX_CLUSTER, ("%s: cluster %u too big",
1200 __func__, cluster));
1201 KASSERT(cluster_id <= APIC_MAX_INTRACLUSTER_ID,
1202 ("%s: intra cluster id %u too big", __func__, cluster_id));
1203 la = &lapics[apic_id];
1204 la->la_cluster = cluster;
1205 la->la_cluster_id = cluster_id;
1209 native_lapic_set_lvt_mask(u_int apic_id, u_int pin, u_char masked)
1212 if (pin > APIC_LVT_MAX)
1214 if (apic_id == APIC_ID_ALL) {
1215 lvts[pin].lvt_masked = masked;
1219 KASSERT(lapics[apic_id].la_present,
1220 ("%s: missing APIC %u", __func__, apic_id));
1221 lapics[apic_id].la_lvts[pin].lvt_masked = masked;
1222 lapics[apic_id].la_lvts[pin].lvt_active = 1;
1224 printf("lapic%u:", apic_id);
1227 printf(" LINT%u %s\n", pin, masked ? "masked" : "unmasked");
1232 native_lapic_set_lvt_mode(u_int apic_id, u_int pin, u_int32_t mode)
1236 if (pin > APIC_LVT_MAX)
1238 if (apic_id == APIC_ID_ALL) {
1243 KASSERT(lapics[apic_id].la_present,
1244 ("%s: missing APIC %u", __func__, apic_id));
1245 lvt = &lapics[apic_id].la_lvts[pin];
1246 lvt->lvt_active = 1;
1248 printf("lapic%u:", apic_id);
1250 lvt->lvt_mode = mode;
1252 case APIC_LVT_DM_NMI:
1253 case APIC_LVT_DM_SMI:
1254 case APIC_LVT_DM_INIT:
1255 case APIC_LVT_DM_EXTINT:
1256 lvt->lvt_edgetrigger = 1;
1257 lvt->lvt_activehi = 1;
1258 if (mode == APIC_LVT_DM_EXTINT)
1259 lvt->lvt_masked = 1;
1261 lvt->lvt_masked = 0;
1264 panic("Unsupported delivery mode: 0x%x\n", mode);
1267 printf(" Routing ");
1269 case APIC_LVT_DM_NMI:
1272 case APIC_LVT_DM_SMI:
1275 case APIC_LVT_DM_INIT:
1278 case APIC_LVT_DM_EXTINT:
1282 printf(" -> LINT%u\n", pin);
1288 native_lapic_set_lvt_polarity(u_int apic_id, u_int pin, enum intr_polarity pol)
1291 if (pin > APIC_LVT_MAX || pol == INTR_POLARITY_CONFORM)
1293 if (apic_id == APIC_ID_ALL) {
1294 lvts[pin].lvt_activehi = (pol == INTR_POLARITY_HIGH);
1298 KASSERT(lapics[apic_id].la_present,
1299 ("%s: missing APIC %u", __func__, apic_id));
1300 lapics[apic_id].la_lvts[pin].lvt_active = 1;
1301 lapics[apic_id].la_lvts[pin].lvt_activehi =
1302 (pol == INTR_POLARITY_HIGH);
1304 printf("lapic%u:", apic_id);
1307 printf(" LINT%u polarity: %s\n", pin,
1308 pol == INTR_POLARITY_HIGH ? "high" : "low");
1313 native_lapic_set_lvt_triggermode(u_int apic_id, u_int pin,
1314 enum intr_trigger trigger)
1317 if (pin > APIC_LVT_MAX || trigger == INTR_TRIGGER_CONFORM)
1319 if (apic_id == APIC_ID_ALL) {
1320 lvts[pin].lvt_edgetrigger = (trigger == INTR_TRIGGER_EDGE);
1324 KASSERT(lapics[apic_id].la_present,
1325 ("%s: missing APIC %u", __func__, apic_id));
1326 lapics[apic_id].la_lvts[pin].lvt_edgetrigger =
1327 (trigger == INTR_TRIGGER_EDGE);
1328 lapics[apic_id].la_lvts[pin].lvt_active = 1;
1330 printf("lapic%u:", apic_id);
1333 printf(" LINT%u trigger: %s\n", pin,
1334 trigger == INTR_TRIGGER_EDGE ? "edge" : "level");
1339 * Adjust the TPR of the current CPU so that it blocks all interrupts below
1340 * the passed in vector.
1343 lapic_set_tpr(u_int vector)
1346 lapic_write32(LAPIC_TPR, vector);
1350 tpr = lapic_read32(LAPIC_TPR) & ~APIC_TPR_PRIO;
1352 lapic_write32(LAPIC_TPR, tpr);
1357 native_lapic_eoi(void)
1360 lapic_write32_nofence(LAPIC_EOI, 0);
1364 lapic_handle_intr(int vector, struct trapframe *frame)
1366 struct intsrc *isrc;
1368 /* The frame may have been written into a poisoned region. */
1369 kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
1370 trap_check_kstack();
1372 isrc = intr_lookup_source(apic_idt_to_irq(PCPU_GET(apic_id),
1374 intr_execute_handlers(isrc, frame);
1378 lapic_handle_timer(struct trapframe *frame)
1381 struct trapframe *oldframe;
1384 /* Send EOI first thing. */
1387 /* The frame may have been written into a poisoned region. */
1388 kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
1389 trap_check_kstack();
1391 #if defined(SMP) && !defined(SCHED_ULE)
1393 * Don't do any accounting for the disabled HTT cores, since it
1394 * will provide misleading numbers for the userland.
1396 * No locking is necessary here, since even if we lose the race
1397 * when hlt_cpus_mask changes it is not a big deal, really.
1399 * Don't do that for ULE, since ULE doesn't consider hlt_cpus_mask
1400 * and unlike other schedulers it actually schedules threads to
1403 if (CPU_ISSET(PCPU_GET(cpuid), &hlt_cpus_mask))
1407 /* Look up our local APIC structure for the tick counters. */
1408 la = &lapics[PCPU_GET(apic_id)];
1409 (*la->la_timer_count)++;
1411 if (lapic_et.et_active) {
1413 td->td_intr_nesting_level++;
1414 oldframe = td->td_intr_frame;
1415 td->td_intr_frame = frame;
1416 lapic_et.et_event_cb(&lapic_et, lapic_et.et_arg);
1417 td->td_intr_frame = oldframe;
1418 td->td_intr_nesting_level--;
1424 lapic_timer_set_divisor(u_int divisor)
1427 KASSERT(powerof2(divisor), ("lapic: invalid divisor %u", divisor));
1428 KASSERT(ffs(divisor) <= nitems(lapic_timer_divisors),
1429 ("lapic: invalid divisor %u", divisor));
1430 lapic_write32(LAPIC_DCR_TIMER, lapic_timer_divisors[ffs(divisor) - 1]);
1434 lapic_timer_oneshot(struct lapic *la)
1438 value = la->lvt_timer_base;
1439 value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1440 value |= APIC_LVTT_TM_ONE_SHOT;
1441 la->lvt_timer_last = value;
1442 lapic_write32(LAPIC_LVT_TIMER, value);
1443 lapic_write32(LAPIC_ICR_TIMER, la->la_timer_period);
1447 lapic_timer_oneshot_nointr(struct lapic *la, uint32_t count)
1451 value = la->lvt_timer_base;
1452 value &= ~APIC_LVTT_TM;
1453 value |= APIC_LVTT_TM_ONE_SHOT | APIC_LVT_M;
1454 la->lvt_timer_last = value;
1455 lapic_write32(LAPIC_LVT_TIMER, value);
1456 lapic_write32(LAPIC_ICR_TIMER, count);
1460 lapic_timer_periodic(struct lapic *la)
1464 value = la->lvt_timer_base;
1465 value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1466 value |= APIC_LVTT_TM_PERIODIC;
1467 la->lvt_timer_last = value;
1468 lapic_write32(LAPIC_LVT_TIMER, value);
1469 lapic_write32(LAPIC_ICR_TIMER, la->la_timer_period);
1473 lapic_timer_deadline(struct lapic *la)
1477 value = la->lvt_timer_base;
1478 value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1479 value |= APIC_LVTT_TM_TSCDLT;
1480 if (value != la->lvt_timer_last) {
1481 la->lvt_timer_last = value;
1482 lapic_write32_nofence(LAPIC_LVT_TIMER, value);
1486 wrmsr(MSR_TSC_DEADLINE, la->la_timer_period + rdtsc());
1490 lapic_timer_stop(struct lapic *la)
1494 if (la->la_timer_mode == LAT_MODE_DEADLINE) {
1495 wrmsr(MSR_TSC_DEADLINE, 0);
1498 value = la->lvt_timer_base;
1499 value &= ~APIC_LVTT_TM;
1500 value |= APIC_LVT_M;
1501 la->lvt_timer_last = value;
1502 lapic_write32(LAPIC_LVT_TIMER, value);
1507 lapic_handle_cmc(void)
1509 trap_check_kstack();
1516 * Called from the mca_init() to activate the CMC interrupt if this CPU is
1517 * responsible for monitoring any MC banks for CMC events. Since mca_init()
1518 * is called prior to lapic_setup() during boot, this just needs to unmask
1519 * this CPU's LVT_CMCI entry.
1522 native_lapic_enable_cmc(void)
1527 if (!x2apic_mode && lapic_map == NULL)
1530 apic_id = PCPU_GET(apic_id);
1531 KASSERT(lapics[apic_id].la_present,
1532 ("%s: missing APIC %u", __func__, apic_id));
1533 lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_masked = 0;
1534 lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_active = 1;
1538 native_lapic_enable_mca_elvt(void)
1545 if (lapic_map == NULL)
1549 apic_id = PCPU_GET(apic_id);
1550 KASSERT(lapics[apic_id].la_present,
1551 ("%s: missing APIC %u", __func__, apic_id));
1552 elvt_count = amd_read_elvt_count();
1553 if (elvt_count <= APIC_ELVT_MCA)
1556 value = lapic_read32(LAPIC_EXT_LVT0 + APIC_ELVT_MCA);
1557 if ((value & APIC_LVT_M) == 0) {
1559 printf("AMD MCE Thresholding Extended LVT is already active\n");
1560 return (APIC_ELVT_MCA);
1562 lapics[apic_id].la_elvts[APIC_ELVT_MCA].lvt_masked = 0;
1563 lapics[apic_id].la_elvts[APIC_ELVT_MCA].lvt_active = 1;
1564 return (APIC_ELVT_MCA);
1568 lapic_handle_error(void)
1572 trap_check_kstack();
1575 * Read the contents of the error status register. Write to
1576 * the register first before reading from it to force the APIC
1577 * to update its value to indicate any errors that have
1578 * occurred since the previous write to the register.
1580 lapic_write32(LAPIC_ESR, 0);
1581 esr = lapic_read32(LAPIC_ESR);
1583 printf("CPU%d: local APIC error 0x%x\n", PCPU_GET(cpuid), esr);
1588 native_apic_cpuid(u_int apic_id)
1591 return apic_cpuids[apic_id];
1597 /* Request a free IDT vector to be used by the specified IRQ. */
1599 native_apic_alloc_vector(u_int apic_id, u_int irq)
1603 KASSERT(irq < num_io_irqs, ("Invalid IRQ %u", irq));
1606 * Search for a free vector. Currently we just use a very simple
1607 * algorithm to find the first free vector.
1609 mtx_lock_spin(&icu_lock);
1610 for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
1611 if (lapics[apic_id].la_ioint_irqs[vector] != IRQ_FREE)
1613 lapics[apic_id].la_ioint_irqs[vector] = irq;
1614 mtx_unlock_spin(&icu_lock);
1615 return (vector + APIC_IO_INTS);
1617 mtx_unlock_spin(&icu_lock);
1622 * Request 'count' free contiguous IDT vectors to be used by 'count'
1623 * IRQs. 'count' must be a power of two and the vectors will be
1624 * aligned on a boundary of 'align'. If the request cannot be
1625 * satisfied, 0 is returned.
1628 native_apic_alloc_vectors(u_int apic_id, u_int *irqs, u_int count, u_int align)
1630 u_int first, run, vector;
1632 KASSERT(powerof2(count), ("bad count"));
1633 KASSERT(powerof2(align), ("bad align"));
1634 KASSERT(align >= count, ("align < count"));
1636 for (run = 0; run < count; run++)
1637 KASSERT(irqs[run] < num_io_irqs, ("Invalid IRQ %u at index %u",
1642 * Search for 'count' free vectors. As with apic_alloc_vector(),
1643 * this just uses a simple first fit algorithm.
1647 mtx_lock_spin(&icu_lock);
1648 for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
1649 /* Vector is in use, end run. */
1650 if (lapics[apic_id].la_ioint_irqs[vector] != IRQ_FREE) {
1656 /* Start a new run if run == 0 and vector is aligned. */
1658 if ((vector & (align - 1)) != 0)
1664 /* Keep looping if the run isn't long enough yet. */
1668 /* Found a run, assign IRQs and return the first vector. */
1669 for (vector = 0; vector < count; vector++)
1670 lapics[apic_id].la_ioint_irqs[first + vector] =
1672 mtx_unlock_spin(&icu_lock);
1673 return (first + APIC_IO_INTS);
1675 mtx_unlock_spin(&icu_lock);
1676 printf("APIC: Couldn't find APIC vectors for %u IRQs\n", count);
1681 * Enable a vector for a particular apic_id. Since all lapics share idt
1682 * entries and ioint_handlers this enables the vector on all lapics. lapics
1683 * which do not have the vector configured would report spurious interrupts
1687 native_apic_enable_vector(u_int apic_id, u_int vector)
1690 KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1691 KASSERT(ioint_handlers[vector / 32] != NULL,
1692 ("No ISR handler for vector %u", vector));
1693 #ifdef KDTRACE_HOOKS
1694 KASSERT(vector != IDT_DTRACE_RET,
1695 ("Attempt to overwrite DTrace entry"));
1697 setidt(vector, (pti ? ioint_pti_handlers : ioint_handlers)[vector / 32],
1698 SDT_APIC, SEL_KPL, GSEL_APIC);
1702 native_apic_disable_vector(u_int apic_id, u_int vector)
1705 KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1706 #ifdef KDTRACE_HOOKS
1707 KASSERT(vector != IDT_DTRACE_RET,
1708 ("Attempt to overwrite DTrace entry"));
1710 KASSERT(ioint_handlers[vector / 32] != NULL,
1711 ("No ISR handler for vector %u", vector));
1714 * We can not currently clear the idt entry because other cpus
1715 * may have a valid vector at this offset.
1717 setidt(vector, pti ? &IDTVEC(rsvd_pti) : &IDTVEC(rsvd), SDT_APIC,
1718 SEL_KPL, GSEL_APIC);
1722 /* Release an APIC vector when it's no longer in use. */
1724 native_apic_free_vector(u_int apic_id, u_int vector, u_int irq)
1728 KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1729 vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1730 ("Vector %u does not map to an IRQ line", vector));
1731 KASSERT(irq < num_io_irqs, ("Invalid IRQ %u", irq));
1732 KASSERT(lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] ==
1733 irq, ("IRQ mismatch"));
1734 #ifdef KDTRACE_HOOKS
1735 KASSERT(vector != IDT_DTRACE_RET,
1736 ("Attempt to overwrite DTrace entry"));
1740 * Bind us to the cpu that owned the vector before freeing it so
1741 * we don't lose an interrupt delivery race.
1746 if (sched_is_bound(td))
1747 panic("apic_free_vector: Thread already bound.\n");
1748 sched_bind(td, apic_cpuid(apic_id));
1751 mtx_lock_spin(&icu_lock);
1752 lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] = IRQ_FREE;
1753 mtx_unlock_spin(&icu_lock);
1761 /* Map an IDT vector (APIC) to an IRQ (interrupt source). */
1763 apic_idt_to_irq(u_int apic_id, u_int vector)
1767 KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1768 vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1769 ("Vector %u does not map to an IRQ line", vector));
1770 #ifdef KDTRACE_HOOKS
1771 KASSERT(vector != IDT_DTRACE_RET,
1772 ("Attempt to overwrite DTrace entry"));
1774 irq = lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS];
1782 * Dump data about APIC IDT vector mappings.
1784 DB_SHOW_COMMAND(apic, db_show_apic)
1786 struct intsrc *isrc;
1791 if (strcmp(modif, "vv") == 0)
1793 else if (strcmp(modif, "v") == 0)
1797 for (apic_id = 0; apic_id <= max_apic_id; apic_id++) {
1798 if (lapics[apic_id].la_present == 0)
1800 db_printf("Interrupts bound to lapic %u\n", apic_id);
1801 for (i = 0; i < APIC_NUM_IOINTS + 1 && !db_pager_quit; i++) {
1802 irq = lapics[apic_id].la_ioint_irqs[i];
1803 if (irq == IRQ_FREE || irq == IRQ_SYSCALL)
1805 #ifdef KDTRACE_HOOKS
1806 if (irq == IRQ_DTRACE_RET)
1810 if (irq == IRQ_EVTCHN)
1813 db_printf("vec 0x%2x -> ", i + APIC_IO_INTS);
1814 if (irq == IRQ_TIMER)
1815 db_printf("lapic timer\n");
1816 else if (irq < num_io_irqs) {
1817 isrc = intr_lookup_source(irq);
1818 if (isrc == NULL || verbose == 0)
1819 db_printf("IRQ %u\n", irq);
1821 db_dump_intr_event(isrc->is_event,
1824 db_printf("IRQ %u ???\n", irq);
1830 dump_mask(const char *prefix, uint32_t v, int base)
1835 for (i = 0; i < 32; i++)
1838 db_printf("%s:", prefix);
1841 db_printf(" %02x", base + i);
1847 /* Show info from the lapic regs for this CPU. */
1848 DB_SHOW_COMMAND(lapic, db_show_lapic)
1852 db_printf("lapic ID = %d\n", lapic_id());
1853 v = lapic_read32(LAPIC_VERSION);
1854 db_printf("version = %d.%d\n", (v & APIC_VER_VERSION) >> 4,
1856 db_printf("max LVT = %d\n", (v & APIC_VER_MAXLVT) >> MAXLVTSHIFT);
1857 v = lapic_read32(LAPIC_SVR);
1858 db_printf("SVR = %02x (%s)\n", v & APIC_SVR_VECTOR,
1859 v & APIC_SVR_ENABLE ? "enabled" : "disabled");
1860 db_printf("TPR = %02x\n", lapic_read32(LAPIC_TPR));
1862 #define dump_field(prefix, regn, index) \
1863 dump_mask(__XSTRING(prefix ## index), \
1864 lapic_read32(LAPIC_ ## regn ## index), \
1867 db_printf("In-service Interrupts:\n");
1868 dump_field(isr, ISR, 0);
1869 dump_field(isr, ISR, 1);
1870 dump_field(isr, ISR, 2);
1871 dump_field(isr, ISR, 3);
1872 dump_field(isr, ISR, 4);
1873 dump_field(isr, ISR, 5);
1874 dump_field(isr, ISR, 6);
1875 dump_field(isr, ISR, 7);
1877 db_printf("TMR Interrupts:\n");
1878 dump_field(tmr, TMR, 0);
1879 dump_field(tmr, TMR, 1);
1880 dump_field(tmr, TMR, 2);
1881 dump_field(tmr, TMR, 3);
1882 dump_field(tmr, TMR, 4);
1883 dump_field(tmr, TMR, 5);
1884 dump_field(tmr, TMR, 6);
1885 dump_field(tmr, TMR, 7);
1887 db_printf("IRR Interrupts:\n");
1888 dump_field(irr, IRR, 0);
1889 dump_field(irr, IRR, 1);
1890 dump_field(irr, IRR, 2);
1891 dump_field(irr, IRR, 3);
1892 dump_field(irr, IRR, 4);
1893 dump_field(irr, IRR, 5);
1894 dump_field(irr, IRR, 6);
1895 dump_field(irr, IRR, 7);
1902 * APIC probing support code. This includes code to manage enumerators.
1905 static SLIST_HEAD(, apic_enumerator) enumerators =
1906 SLIST_HEAD_INITIALIZER(enumerators);
1907 static struct apic_enumerator *best_enum;
1910 apic_register_enumerator(struct apic_enumerator *enumerator)
1913 struct apic_enumerator *apic_enum;
1915 SLIST_FOREACH(apic_enum, &enumerators, apic_next) {
1916 if (apic_enum == enumerator)
1917 panic("%s: Duplicate register of %s", __func__,
1918 enumerator->apic_name);
1921 SLIST_INSERT_HEAD(&enumerators, enumerator, apic_next);
1925 * We have to look for CPU's very, very early because certain subsystems
1926 * want to know how many CPU's we have extremely early on in the boot
1930 apic_init(void *dummy __unused)
1932 struct apic_enumerator *enumerator;
1935 /* We only support built in local APICs. */
1936 if (!(cpu_feature & CPUID_APIC))
1939 /* Don't probe if APIC mode is disabled. */
1940 if (resource_disabled("apic", 0))
1943 /* Probe all the enumerators to find the best match. */
1946 SLIST_FOREACH(enumerator, &enumerators, apic_next) {
1947 retval = enumerator->apic_probe();
1950 if (best_enum == NULL || best < retval) {
1951 best_enum = enumerator;
1955 if (best_enum == NULL) {
1957 printf("APIC: Could not find any APICs.\n");
1959 panic("running without device atpic requires a local APIC");
1965 printf("APIC: Using the %s enumerator.\n",
1966 best_enum->apic_name);
1970 * To work around an errata, we disable the local APIC on some
1971 * CPUs during early startup. We need to turn the local APIC back
1972 * on on such CPUs now.
1974 ppro_reenable_apic();
1977 /* Probe the CPU's in the system. */
1978 retval = best_enum->apic_probe_cpus();
1980 printf("%s: Failed to probe CPUs: returned %d\n",
1981 best_enum->apic_name, retval);
1984 SYSINIT(apic_init, SI_SUB_TUNABLES - 1, SI_ORDER_SECOND, apic_init, NULL);
1987 * Setup the local APIC. We have to do this prior to starting up the APs
1991 apic_setup_local(void *dummy __unused)
1995 if (best_enum == NULL)
1998 lapics = malloc(sizeof(*lapics) * (max_apic_id + 1), M_LAPIC,
2001 /* Initialize the local APIC. */
2002 retval = best_enum->apic_setup_local();
2004 printf("%s: Failed to setup the local APIC: returned %d\n",
2005 best_enum->apic_name, retval);
2007 SYSINIT(apic_setup_local, SI_SUB_CPU, SI_ORDER_SECOND, apic_setup_local, NULL);
2010 * Setup the I/O APICs.
2013 apic_setup_io(void *dummy __unused)
2017 if (best_enum == NULL)
2021 * Local APIC must be registered before other PICs and pseudo PICs
2022 * for proper suspend/resume order.
2024 intr_register_pic(&lapic_pic);
2026 retval = best_enum->apic_setup_io();
2028 printf("%s: Failed to setup I/O APICs: returned %d\n",
2029 best_enum->apic_name, retval);
2032 * Finish setting up the local APIC on the BSP once we know
2033 * how to properly program the LINT pins. In particular, this
2034 * enables the EOI suppression mode, if LAPIC supports it and
2035 * user did not disable the mode.
2041 /* Enable the MSI "pic". */
2042 init_ops.msi_init();
2045 xen_intr_alloc_irqs();
2048 SYSINIT(apic_setup_io, SI_SUB_INTR, SI_ORDER_THIRD, apic_setup_io, NULL);
2052 * Inter Processor Interrupt functions. The lapic_ipi_*() functions are
2053 * private to the MD code. The public interface for the rest of the
2054 * kernel is defined in mp_machdep.c.
2058 * Wait delay microseconds for IPI to be sent. If delay is -1, we
2062 native_lapic_ipi_wait(int delay)
2066 /* LAPIC_ICR.APIC_DELSTAT_MASK is undefined in x2APIC mode */
2070 for (rx = 0; delay == -1 || rx < lapic_ipi_wait_mult * delay; rx++) {
2071 if ((lapic_read_icr_lo() & APIC_DELSTAT_MASK) ==
2080 native_lapic_ipi_raw(register_t icrlo, u_int dest)
2084 /* XXX: Need more sanity checking of icrlo? */
2085 KASSERT(x2apic_mode || lapic_map != NULL,
2086 ("%s called too early", __func__));
2087 KASSERT(x2apic_mode ||
2088 (dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
2089 ("%s: invalid dest field", __func__));
2090 KASSERT((icrlo & APIC_ICRLO_RESV_MASK) == 0,
2091 ("%s: reserved bits set in ICR LO register", __func__));
2093 if ((icrlo & APIC_DEST_MASK) == APIC_DEST_DESTFLD) {
2097 icrhi = dest << APIC_ID_SHIFT;
2098 lapic_write_icr(icrhi, icrlo);
2100 lapic_write_icr_lo(icrlo);
2104 #ifdef DETECT_DEADLOCK
2105 #define AFTER_SPIN 50
2109 native_lapic_ipi_vectored(u_int vector, int dest)
2111 register_t icrlo, destfield;
2113 KASSERT((vector & ~APIC_VECTOR_MASK) == 0,
2114 ("%s: invalid vector %d", __func__, vector));
2118 case APIC_IPI_DEST_SELF:
2119 if (x2apic_mode && vector < IPI_NMI_FIRST) {
2120 lapic_write_self_ipi(vector);
2123 icrlo = APIC_DEST_SELF;
2125 case APIC_IPI_DEST_ALL:
2126 icrlo = APIC_DEST_ALLISELF;
2128 case APIC_IPI_DEST_OTHERS:
2129 icrlo = APIC_DEST_ALLESELF;
2133 KASSERT(x2apic_mode ||
2134 (dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
2135 ("%s: invalid destination 0x%x", __func__, dest));
2140 * NMI IPIs are just fake vectors used to send a NMI. Use special rules
2141 * regarding NMIs if passed, otherwise specify the vector.
2143 if (vector >= IPI_NMI_FIRST)
2144 icrlo |= APIC_DELMODE_NMI;
2146 icrlo |= vector | APIC_DELMODE_FIXED;
2147 icrlo |= APIC_DESTMODE_PHY | APIC_TRIGMOD_EDGE | APIC_LEVEL_ASSERT;
2149 /* Wait for an earlier IPI to finish. */
2150 if (!lapic_ipi_wait(lapic_ds_idle_timeout)) {
2151 if (KERNEL_PANICKED())
2154 panic("APIC: Previous IPI is stuck");
2157 lapic_ipi_raw(icrlo, destfield);
2159 #ifdef DETECT_DEADLOCK
2160 /* Wait for IPI to be delivered. */
2161 if (!lapic_ipi_wait(AFTER_SPIN)) {
2162 #ifdef needsattention
2166 * The above function waits for the message to actually be
2167 * delivered. It breaks out after an arbitrary timeout
2168 * since the message should eventually be delivered (at
2169 * least in theory) and that if it wasn't we would catch
2170 * the failure with the check above when the next IPI is
2173 * We could skip this wait entirely, EXCEPT it probably
2174 * protects us from other routines that assume that the
2175 * message was delivered and acted upon when this function
2178 printf("APIC: IPI might be stuck\n");
2179 #else /* !needsattention */
2180 /* Wait until mesage is sent without a timeout. */
2181 while (lapic_read_icr_lo() & APIC_DELSTAT_PEND)
2183 #endif /* needsattention */
2185 #endif /* DETECT_DEADLOCK */
2191 * Since the IDT is shared by all CPUs the IPI slot update needs to be globally
2194 * Consider the case where an IPI is generated immediately after allocation:
2195 * vector = lapic_ipi_alloc(ipifunc);
2196 * ipi_selected(other_cpus, vector);
2198 * In xAPIC mode a write to ICR_LO has serializing semantics because the
2199 * APIC page is mapped as an uncached region. In x2APIC mode there is an
2200 * explicit 'mfence' before the ICR MSR is written. Therefore in both cases
2201 * the IDT slot update is globally visible before the IPI is delivered.
2204 native_lapic_ipi_alloc(inthand_t *ipifunc)
2206 struct gate_descriptor *ip;
2210 KASSERT(ipifunc != &IDTVEC(rsvd) && ipifunc != &IDTVEC(rsvd_pti),
2211 ("invalid ipifunc %p", ipifunc));
2214 mtx_lock_spin(&icu_lock);
2215 for (idx = IPI_DYN_FIRST; idx <= IPI_DYN_LAST; idx++) {
2217 func = (ip->gd_hioffset << 16) | ip->gd_looffset;
2219 func -= setidt_disp;
2221 if ((!pti && func == (uintptr_t)&IDTVEC(rsvd)) ||
2222 (pti && func == (uintptr_t)&IDTVEC(rsvd_pti))) {
2224 setidt(vector, ipifunc, SDT_APIC, SEL_KPL, GSEL_APIC);
2228 mtx_unlock_spin(&icu_lock);
2233 native_lapic_ipi_free(int vector)
2235 struct gate_descriptor *ip;
2238 KASSERT(vector >= IPI_DYN_FIRST && vector <= IPI_DYN_LAST,
2239 ("%s: invalid vector %d", __func__, vector));
2241 mtx_lock_spin(&icu_lock);
2243 func = (ip->gd_hioffset << 16) | ip->gd_looffset;
2245 func -= setidt_disp;
2247 KASSERT(func != (uintptr_t)&IDTVEC(rsvd) &&
2248 func != (uintptr_t)&IDTVEC(rsvd_pti),
2249 ("invalid idtfunc %#lx", func));
2250 setidt(vector, pti ? &IDTVEC(rsvd_pti) : &IDTVEC(rsvd), SDT_APIC,
2251 SEL_KPL, GSEL_APIC);
2252 mtx_unlock_spin(&icu_lock);