]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/x86/x86/local_apic.c
Calibrate the frequency of the of the native_lapic_ipi_wait() loop,
[FreeBSD/FreeBSD.git] / sys / x86 / x86 / local_apic.c
1 /*-
2  * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
3  * Copyright (c) 1996, by Steve Passe
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. The name of the developer may NOT be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29
30 /*
31  * Local APIC support on Pentium and later processors.
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_atpic.h"
38 #include "opt_hwpmc_hooks.h"
39
40 #include "opt_ddb.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/pcpu.h>
49 #include <sys/proc.h>
50 #include <sys/sched.h>
51 #include <sys/smp.h>
52 #include <sys/sysctl.h>
53 #include <sys/timeet.h>
54
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57
58 #include <x86/apicreg.h>
59 #include <machine/clock.h>
60 #include <machine/cpufunc.h>
61 #include <machine/cputypes.h>
62 #include <machine/frame.h>
63 #include <machine/intr_machdep.h>
64 #include <x86/apicvar.h>
65 #include <x86/mca.h>
66 #include <machine/md_var.h>
67 #include <machine/smp.h>
68 #include <machine/specialreg.h>
69 #include <x86/init.h>
70
71 #ifdef DDB
72 #include <sys/interrupt.h>
73 #include <ddb/ddb.h>
74 #endif
75
76 #ifdef __amd64__
77 #define SDT_APIC        SDT_SYSIGT
78 #define SDT_APICT       SDT_SYSIGT
79 #define GSEL_APIC       0
80 #else
81 #define SDT_APIC        SDT_SYS386IGT
82 #define SDT_APICT       SDT_SYS386TGT
83 #define GSEL_APIC       GSEL(GCODE_SEL, SEL_KPL)
84 #endif
85
86 /* Sanity checks on IDT vectors. */
87 CTASSERT(APIC_IO_INTS + APIC_NUM_IOINTS == APIC_TIMER_INT);
88 CTASSERT(APIC_TIMER_INT < APIC_LOCAL_INTS);
89 CTASSERT(APIC_LOCAL_INTS == 240);
90 CTASSERT(IPI_STOP < APIC_SPURIOUS_INT);
91
92 /* Magic IRQ values for the timer and syscalls. */
93 #define IRQ_TIMER       (NUM_IO_INTS + 1)
94 #define IRQ_SYSCALL     (NUM_IO_INTS + 2)
95 #define IRQ_DTRACE_RET  (NUM_IO_INTS + 3)
96 #define IRQ_EVTCHN      (NUM_IO_INTS + 4)
97
98 enum lat_timer_mode {
99         LAT_MODE_UNDEF =        0,
100         LAT_MODE_PERIODIC =     1,
101         LAT_MODE_ONESHOT =      2,
102         LAT_MODE_DEADLINE =     3,
103 };
104
105 /*
106  * Support for local APICs.  Local APICs manage interrupts on each
107  * individual processor as opposed to I/O APICs which receive interrupts
108  * from I/O devices and then forward them on to the local APICs.
109  *
110  * Local APICs can also send interrupts to each other thus providing the
111  * mechanism for IPIs.
112  */
113
114 struct lvt {
115         u_int lvt_edgetrigger:1;
116         u_int lvt_activehi:1;
117         u_int lvt_masked:1;
118         u_int lvt_active:1;
119         u_int lvt_mode:16;
120         u_int lvt_vector:8;
121 };
122
123 struct lapic {
124         struct lvt la_lvts[APIC_LVT_MAX + 1];
125         u_int la_id:8;
126         u_int la_cluster:4;
127         u_int la_cluster_id:2;
128         u_int la_present:1;
129         u_long *la_timer_count;
130         uint64_t la_timer_period;
131         enum lat_timer_mode la_timer_mode;
132         uint32_t lvt_timer_base;
133         uint32_t lvt_timer_last;
134         /* Include IDT_SYSCALL to make indexing easier. */
135         int la_ioint_irqs[APIC_NUM_IOINTS + 1];
136 } static lapics[MAX_APIC_ID + 1];
137
138 /* Global defaults for local APIC LVT entries. */
139 static struct lvt lvts[APIC_LVT_MAX + 1] = {
140         { 1, 1, 1, 1, APIC_LVT_DM_EXTINT, 0 },  /* LINT0: masked ExtINT */
141         { 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 },     /* LINT1: NMI */
142         { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_TIMER_INT },      /* Timer */
143         { 1, 1, 0, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT },      /* Error */
144         { 1, 1, 1, 1, APIC_LVT_DM_NMI, 0 },     /* PMC */
145         { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_THERMAL_INT },    /* Thermal */
146         { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_CMC_INT },        /* CMCI */
147 };
148
149 static inthand_t *ioint_handlers[] = {
150         NULL,                   /* 0 - 31 */
151         IDTVEC(apic_isr1),      /* 32 - 63 */
152         IDTVEC(apic_isr2),      /* 64 - 95 */
153         IDTVEC(apic_isr3),      /* 96 - 127 */
154         IDTVEC(apic_isr4),      /* 128 - 159 */
155         IDTVEC(apic_isr5),      /* 160 - 191 */
156         IDTVEC(apic_isr6),      /* 192 - 223 */
157         IDTVEC(apic_isr7),      /* 224 - 255 */
158 };
159
160
161 static u_int32_t lapic_timer_divisors[] = {
162         APIC_TDCR_1, APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16,
163         APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128
164 };
165
166 extern inthand_t IDTVEC(rsvd);
167
168 volatile char *lapic_map;
169 vm_paddr_t lapic_paddr;
170 int x2apic_mode;
171 int lapic_eoi_suppression;
172 static int lapic_timer_tsc_deadline;
173 static u_long lapic_timer_divisor;
174 static struct eventtimer lapic_et;
175 static uint64_t lapic_ipi_wait_mult;
176
177 SYSCTL_NODE(_hw, OID_AUTO, apic, CTLFLAG_RD, 0, "APIC options");
178 SYSCTL_INT(_hw_apic, OID_AUTO, x2apic_mode, CTLFLAG_RD, &x2apic_mode, 0, "");
179 SYSCTL_INT(_hw_apic, OID_AUTO, eoi_suppression, CTLFLAG_RD,
180     &lapic_eoi_suppression, 0, "");
181 SYSCTL_INT(_hw_apic, OID_AUTO, timer_tsc_deadline, CTLFLAG_RD,
182     &lapic_timer_tsc_deadline, 0, "");
183
184 static uint32_t
185 lapic_read32(enum LAPIC_REGISTERS reg)
186 {
187         uint32_t res;
188
189         if (x2apic_mode) {
190                 res = rdmsr32(MSR_APIC_000 + reg);
191         } else {
192                 res = *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL);
193         }
194         return (res);
195 }
196
197 static void
198 lapic_write32(enum LAPIC_REGISTERS reg, uint32_t val)
199 {
200
201         if (x2apic_mode) {
202                 mfence();
203                 wrmsr(MSR_APIC_000 + reg, val);
204         } else {
205                 *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL) = val;
206         }
207 }
208
209 static void
210 lapic_write32_nofence(enum LAPIC_REGISTERS reg, uint32_t val)
211 {
212
213         if (x2apic_mode) {
214                 wrmsr(MSR_APIC_000 + reg, val);
215         } else {
216                 *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL) = val;
217         }
218 }
219
220 #ifdef SMP
221 static uint64_t
222 lapic_read_icr(void)
223 {
224         uint64_t v;
225         uint32_t vhi, vlo;
226
227         if (x2apic_mode) {
228                 v = rdmsr(MSR_APIC_000 + LAPIC_ICR_LO);
229         } else {
230                 vhi = lapic_read32(LAPIC_ICR_HI);
231                 vlo = lapic_read32(LAPIC_ICR_LO);
232                 v = ((uint64_t)vhi << 32) | vlo;
233         }
234         return (v);
235 }
236
237 static uint64_t
238 lapic_read_icr_lo(void)
239 {
240
241         return (lapic_read32(LAPIC_ICR_LO));
242 }
243
244 static void
245 lapic_write_icr(uint32_t vhi, uint32_t vlo)
246 {
247         uint64_t v;
248
249         if (x2apic_mode) {
250                 v = ((uint64_t)vhi << 32) | vlo;
251                 mfence();
252                 wrmsr(MSR_APIC_000 + LAPIC_ICR_LO, v);
253         } else {
254                 lapic_write32(LAPIC_ICR_HI, vhi);
255                 lapic_write32(LAPIC_ICR_LO, vlo);
256         }
257 }
258 #endif /* SMP */
259
260 static void
261 native_lapic_enable_x2apic(void)
262 {
263         uint64_t apic_base;
264
265         apic_base = rdmsr(MSR_APICBASE);
266         apic_base |= APICBASE_X2APIC | APICBASE_ENABLED;
267         wrmsr(MSR_APICBASE, apic_base);
268 }
269
270 static void     lapic_enable(void);
271 static void     lapic_resume(struct pic *pic, bool suspend_cancelled);
272 static void     lapic_timer_oneshot(struct lapic *);
273 static void     lapic_timer_oneshot_nointr(struct lapic *, uint32_t);
274 static void     lapic_timer_periodic(struct lapic *);
275 static void     lapic_timer_deadline(struct lapic *);
276 static void     lapic_timer_stop(struct lapic *);
277 static void     lapic_timer_set_divisor(u_int divisor);
278 static uint32_t lvt_mode(struct lapic *la, u_int pin, uint32_t value);
279 static int      lapic_et_start(struct eventtimer *et,
280                     sbintime_t first, sbintime_t period);
281 static int      lapic_et_stop(struct eventtimer *et);
282 static u_int    apic_idt_to_irq(u_int apic_id, u_int vector);
283 static void     lapic_set_tpr(u_int vector);
284
285 struct pic lapic_pic = { .pic_resume = lapic_resume };
286
287 /* Forward declarations for apic_ops */
288 static void     native_lapic_create(u_int apic_id, int boot_cpu);
289 static void     native_lapic_init(vm_paddr_t addr);
290 static void     native_lapic_xapic_mode(void);
291 static void     native_lapic_setup(int boot);
292 static void     native_lapic_dump(const char *str);
293 static void     native_lapic_disable(void);
294 static void     native_lapic_eoi(void);
295 static int      native_lapic_id(void);
296 static int      native_lapic_intr_pending(u_int vector);
297 static u_int    native_apic_cpuid(u_int apic_id);
298 static u_int    native_apic_alloc_vector(u_int apic_id, u_int irq);
299 static u_int    native_apic_alloc_vectors(u_int apic_id, u_int *irqs,
300                     u_int count, u_int align);
301 static void     native_apic_disable_vector(u_int apic_id, u_int vector);
302 static void     native_apic_enable_vector(u_int apic_id, u_int vector);
303 static void     native_apic_free_vector(u_int apic_id, u_int vector, u_int irq);
304 static void     native_lapic_set_logical_id(u_int apic_id, u_int cluster,
305                     u_int cluster_id);
306 static int      native_lapic_enable_pmc(void);
307 static void     native_lapic_disable_pmc(void);
308 static void     native_lapic_reenable_pmc(void);
309 static void     native_lapic_enable_cmc(void);
310 static int      native_lapic_set_lvt_mask(u_int apic_id, u_int lvt,
311                     u_char masked);
312 static int      native_lapic_set_lvt_mode(u_int apic_id, u_int lvt,
313                     uint32_t mode);
314 static int      native_lapic_set_lvt_polarity(u_int apic_id, u_int lvt,
315                     enum intr_polarity pol);
316 static int      native_lapic_set_lvt_triggermode(u_int apic_id, u_int lvt,
317                     enum intr_trigger trigger);
318 #ifdef SMP
319 static void     native_lapic_ipi_raw(register_t icrlo, u_int dest);
320 static void     native_lapic_ipi_vectored(u_int vector, int dest);
321 static int      native_lapic_ipi_wait(int delay);
322 static int      native_lapic_ipi_alloc(inthand_t *ipifunc);
323 static void     native_lapic_ipi_free(int vector);
324 #endif /* SMP */
325
326 struct apic_ops apic_ops = {
327         .create                 = native_lapic_create,
328         .init                   = native_lapic_init,
329         .xapic_mode             = native_lapic_xapic_mode,
330         .setup                  = native_lapic_setup,
331         .dump                   = native_lapic_dump,
332         .disable                = native_lapic_disable,
333         .eoi                    = native_lapic_eoi,
334         .id                     = native_lapic_id,
335         .intr_pending           = native_lapic_intr_pending,
336         .set_logical_id         = native_lapic_set_logical_id,
337         .cpuid                  = native_apic_cpuid,
338         .alloc_vector           = native_apic_alloc_vector,
339         .alloc_vectors          = native_apic_alloc_vectors,
340         .enable_vector          = native_apic_enable_vector,
341         .disable_vector         = native_apic_disable_vector,
342         .free_vector            = native_apic_free_vector,
343         .enable_pmc             = native_lapic_enable_pmc,
344         .disable_pmc            = native_lapic_disable_pmc,
345         .reenable_pmc           = native_lapic_reenable_pmc,
346         .enable_cmc             = native_lapic_enable_cmc,
347 #ifdef SMP
348         .ipi_raw                = native_lapic_ipi_raw,
349         .ipi_vectored           = native_lapic_ipi_vectored,
350         .ipi_wait               = native_lapic_ipi_wait,
351         .ipi_alloc              = native_lapic_ipi_alloc,
352         .ipi_free               = native_lapic_ipi_free,
353 #endif
354         .set_lvt_mask           = native_lapic_set_lvt_mask,
355         .set_lvt_mode           = native_lapic_set_lvt_mode,
356         .set_lvt_polarity       = native_lapic_set_lvt_polarity,
357         .set_lvt_triggermode    = native_lapic_set_lvt_triggermode,
358 };
359
360 static uint32_t
361 lvt_mode(struct lapic *la, u_int pin, uint32_t value)
362 {
363         struct lvt *lvt;
364
365         KASSERT(pin <= APIC_LVT_MAX, ("%s: pin %u out of range", __func__, pin));
366         if (la->la_lvts[pin].lvt_active)
367                 lvt = &la->la_lvts[pin];
368         else
369                 lvt = &lvts[pin];
370
371         value &= ~(APIC_LVT_M | APIC_LVT_TM | APIC_LVT_IIPP | APIC_LVT_DM |
372             APIC_LVT_VECTOR);
373         if (lvt->lvt_edgetrigger == 0)
374                 value |= APIC_LVT_TM;
375         if (lvt->lvt_activehi == 0)
376                 value |= APIC_LVT_IIPP_INTALO;
377         if (lvt->lvt_masked)
378                 value |= APIC_LVT_M;
379         value |= lvt->lvt_mode;
380         switch (lvt->lvt_mode) {
381         case APIC_LVT_DM_NMI:
382         case APIC_LVT_DM_SMI:
383         case APIC_LVT_DM_INIT:
384         case APIC_LVT_DM_EXTINT:
385                 if (!lvt->lvt_edgetrigger && bootverbose) {
386                         printf("lapic%u: Forcing LINT%u to edge trigger\n",
387                             la->la_id, pin);
388                         value |= APIC_LVT_TM;
389                 }
390                 /* Use a vector of 0. */
391                 break;
392         case APIC_LVT_DM_FIXED:
393                 value |= lvt->lvt_vector;
394                 break;
395         default:
396                 panic("bad APIC LVT delivery mode: %#x\n", value);
397         }
398         return (value);
399 }
400
401 /*
402  * Map the local APIC and setup necessary interrupt vectors.
403  */
404 static void
405 native_lapic_init(vm_paddr_t addr)
406 {
407         uint64_t r;
408         uint32_t ver;
409         u_int regs[4];
410         int i, arat;
411
412         /*
413          * Enable x2APIC mode if possible. Map the local APIC
414          * registers page.
415          *
416          * Keep the LAPIC registers page mapped uncached for x2APIC
417          * mode too, to have direct map page attribute set to
418          * uncached.  This is needed to work around CPU errata present
419          * on all Intel processors.
420          */
421         KASSERT(trunc_page(addr) == addr,
422             ("local APIC not aligned on a page boundary"));
423         lapic_paddr = addr;
424         lapic_map = pmap_mapdev(addr, PAGE_SIZE);
425         if (x2apic_mode) {
426                 native_lapic_enable_x2apic();
427                 lapic_map = NULL;
428         }
429
430         /* Setup the spurious interrupt handler. */
431         setidt(APIC_SPURIOUS_INT, IDTVEC(spuriousint), SDT_APIC, SEL_KPL,
432             GSEL_APIC);
433
434         /* Perform basic initialization of the BSP's local APIC. */
435         lapic_enable();
436
437         /* Set BSP's per-CPU local APIC ID. */
438         PCPU_SET(apic_id, lapic_id());
439
440         /* Local APIC timer interrupt. */
441         setidt(APIC_TIMER_INT, IDTVEC(timerint), SDT_APIC, SEL_KPL, GSEL_APIC);
442
443         /* Local APIC error interrupt. */
444         setidt(APIC_ERROR_INT, IDTVEC(errorint), SDT_APIC, SEL_KPL, GSEL_APIC);
445
446         /* XXX: Thermal interrupt */
447
448         /* Local APIC CMCI. */
449         setidt(APIC_CMC_INT, IDTVEC(cmcint), SDT_APICT, SEL_KPL, GSEL_APIC);
450
451         if ((resource_int_value("apic", 0, "clock", &i) != 0 || i != 0)) {
452                 arat = 0;
453                 /* Intel CPUID 0x06 EAX[2] set if APIC timer runs in C3. */
454                 if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_high >= 6) {
455                         do_cpuid(0x06, regs);
456                         if ((regs[0] & CPUTPM1_ARAT) != 0)
457                                 arat = 1;
458                 }
459                 bzero(&lapic_et, sizeof(lapic_et));
460                 lapic_et.et_name = "LAPIC";
461                 lapic_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT |
462                     ET_FLAGS_PERCPU;
463                 lapic_et.et_quality = 600;
464                 if (!arat) {
465                         lapic_et.et_flags |= ET_FLAGS_C3STOP;
466                         lapic_et.et_quality -= 200;
467                 } else if ((cpu_feature & CPUID_TSC) != 0 &&
468                     (cpu_feature2 & CPUID2_TSCDLT) != 0 &&
469                     tsc_is_invariant && tsc_freq != 0) {
470                         lapic_timer_tsc_deadline = 1;
471                         TUNABLE_INT_FETCH("hw.lapic_tsc_deadline",
472                             &lapic_timer_tsc_deadline);
473                 }
474
475                 lapic_et.et_frequency = 0;
476                 /* We don't know frequency yet, so trying to guess. */
477                 lapic_et.et_min_period = 0x00001000LL;
478                 lapic_et.et_max_period = SBT_1S;
479                 lapic_et.et_start = lapic_et_start;
480                 lapic_et.et_stop = lapic_et_stop;
481                 lapic_et.et_priv = NULL;
482                 et_register(&lapic_et);
483         }
484
485         /*
486          * Set lapic_eoi_suppression after lapic_enable(), to not
487          * enable suppression in the hardware prematurely.  Note that
488          * we by default enable suppression even when system only has
489          * one IO-APIC, since EOI is broadcasted to all APIC agents,
490          * including CPUs, otherwise.
491          *
492          * It seems that at least some KVM versions report
493          * EOI_SUPPRESSION bit, but auto-EOI does not work.
494          */
495         ver = lapic_read32(LAPIC_VERSION);
496         if ((ver & APIC_VER_EOI_SUPPRESSION) != 0) {
497                 lapic_eoi_suppression = 1;
498                 if (vm_guest == VM_GUEST_VM &&
499                     !strcmp(hv_vendor, "KVMKVMKVM")) {
500                         if (bootverbose)
501                                 printf(
502                        "KVM -- disabling lapic eoi suppression\n");
503                         lapic_eoi_suppression = 0;
504                 }
505                 TUNABLE_INT_FETCH("hw.lapic_eoi_suppression",
506                     &lapic_eoi_suppression);
507         }
508
509 #define LOOPS 1000000
510         /*
511          * Calibrate the busy loop waiting for IPI ack in xAPIC mode.
512          * lapic_ipi_wait_mult contains the number of iterations which
513          * approximately delay execution for 1 microsecond (the
514          * argument to native_lapic_ipi_wait() is in microseconds).
515          *
516          * We assume that TSC is present and already measured.
517          * Possible TSC frequency jumps are irrelevant to the
518          * calibration loop below, the CPU clock management code is
519          * not yet started, and we do not enter sleep states.
520          */
521         KASSERT((cpu_feature & CPUID_TSC) != 0 && tsc_freq != 0,
522             ("TSC not initialized"));
523         r = rdtsc();
524         for (r = 0; r < LOOPS; r++) {
525                 (void)lapic_read_icr_lo();
526                 ia32_pause();
527         }
528         r = rdtsc() - r;
529         lapic_ipi_wait_mult = (r * 1000000) / tsc_freq / LOOPS;
530         if (bootverbose) {
531                 printf("LAPIC: ipi_wait() us multiplier %jd (r %jd tsc %jd)\n",
532                     (uintmax_t)lapic_ipi_wait_mult, (uintmax_t)r,
533                     (uintmax_t)tsc_freq);
534         }
535 #undef LOOPS
536 }
537
538 /*
539  * Create a local APIC instance.
540  */
541 static void
542 native_lapic_create(u_int apic_id, int boot_cpu)
543 {
544         int i;
545
546         if (apic_id > MAX_APIC_ID) {
547                 printf("APIC: Ignoring local APIC with ID %d\n", apic_id);
548                 if (boot_cpu)
549                         panic("Can't ignore BSP");
550                 return;
551         }
552         KASSERT(!lapics[apic_id].la_present, ("duplicate local APIC %u",
553             apic_id));
554
555         /*
556          * Assume no local LVT overrides and a cluster of 0 and
557          * intra-cluster ID of 0.
558          */
559         lapics[apic_id].la_present = 1;
560         lapics[apic_id].la_id = apic_id;
561         for (i = 0; i <= APIC_LVT_MAX; i++) {
562                 lapics[apic_id].la_lvts[i] = lvts[i];
563                 lapics[apic_id].la_lvts[i].lvt_active = 0;
564         }
565         for (i = 0; i <= APIC_NUM_IOINTS; i++)
566             lapics[apic_id].la_ioint_irqs[i] = -1;
567         lapics[apic_id].la_ioint_irqs[IDT_SYSCALL - APIC_IO_INTS] = IRQ_SYSCALL;
568         lapics[apic_id].la_ioint_irqs[APIC_TIMER_INT - APIC_IO_INTS] =
569             IRQ_TIMER;
570 #ifdef KDTRACE_HOOKS
571         lapics[apic_id].la_ioint_irqs[IDT_DTRACE_RET - APIC_IO_INTS] =
572             IRQ_DTRACE_RET;
573 #endif
574 #ifdef XENHVM
575         lapics[apic_id].la_ioint_irqs[IDT_EVTCHN - APIC_IO_INTS] = IRQ_EVTCHN;
576 #endif
577
578
579 #ifdef SMP
580         cpu_add(apic_id, boot_cpu);
581 #endif
582 }
583
584 /*
585  * Dump contents of local APIC registers
586  */
587 static void
588 native_lapic_dump(const char* str)
589 {
590         uint32_t maxlvt;
591
592         maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
593         printf("cpu%d %s:\n", PCPU_GET(cpuid), str);
594         printf("     ID: 0x%08x   VER: 0x%08x LDR: 0x%08x DFR: 0x%08x",
595             lapic_read32(LAPIC_ID), lapic_read32(LAPIC_VERSION),
596             lapic_read32(LAPIC_LDR), x2apic_mode ? 0 : lapic_read32(LAPIC_DFR));
597         if ((cpu_feature2 & CPUID2_X2APIC) != 0)
598                 printf(" x2APIC: %d", x2apic_mode);
599         printf("\n  lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
600             lapic_read32(LAPIC_LVT_LINT0), lapic_read32(LAPIC_LVT_LINT1),
601             lapic_read32(LAPIC_TPR), lapic_read32(LAPIC_SVR));
602         printf("  timer: 0x%08x therm: 0x%08x err: 0x%08x",
603             lapic_read32(LAPIC_LVT_TIMER), lapic_read32(LAPIC_LVT_THERMAL),
604             lapic_read32(LAPIC_LVT_ERROR));
605         if (maxlvt >= APIC_LVT_PMC)
606                 printf(" pmc: 0x%08x", lapic_read32(LAPIC_LVT_PCINT));
607         printf("\n");
608         if (maxlvt >= APIC_LVT_CMCI)
609                 printf("   cmci: 0x%08x\n", lapic_read32(LAPIC_LVT_CMCI));
610 }
611
612 static void
613 native_lapic_xapic_mode(void)
614 {
615         register_t saveintr;
616
617         saveintr = intr_disable();
618         if (x2apic_mode)
619                 native_lapic_enable_x2apic();
620         intr_restore(saveintr);
621 }
622
623 static void
624 native_lapic_setup(int boot)
625 {
626         struct lapic *la;
627         uint32_t maxlvt;
628         register_t saveintr;
629         char buf[MAXCOMLEN + 1];
630
631         saveintr = intr_disable();
632
633         la = &lapics[lapic_id()];
634         KASSERT(la->la_present, ("missing APIC structure"));
635         maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
636
637         /* Initialize the TPR to allow all interrupts. */
638         lapic_set_tpr(0);
639
640         /* Setup spurious vector and enable the local APIC. */
641         lapic_enable();
642
643         /* Program LINT[01] LVT entries. */
644         lapic_write32(LAPIC_LVT_LINT0, lvt_mode(la, APIC_LVT_LINT0,
645             lapic_read32(LAPIC_LVT_LINT0)));
646         lapic_write32(LAPIC_LVT_LINT1, lvt_mode(la, APIC_LVT_LINT1,
647             lapic_read32(LAPIC_LVT_LINT1)));
648
649         /* Program the PMC LVT entry if present. */
650         if (maxlvt >= APIC_LVT_PMC) {
651                 lapic_write32(LAPIC_LVT_PCINT, lvt_mode(la, APIC_LVT_PMC,
652                     LAPIC_LVT_PCINT));
653         }
654
655         /* Program timer LVT and setup handler. */
656         la->lvt_timer_base = lvt_mode(la, APIC_LVT_TIMER,
657             lapic_read32(LAPIC_LVT_TIMER));
658         la->lvt_timer_last = la->lvt_timer_base;
659         lapic_write32(LAPIC_LVT_TIMER, la->lvt_timer_base);
660         if (boot) {
661                 snprintf(buf, sizeof(buf), "cpu%d:timer", PCPU_GET(cpuid));
662                 intrcnt_add(buf, &la->la_timer_count);
663         }
664
665         /* Setup the timer if configured. */
666         if (la->la_timer_mode != LAT_MODE_UNDEF) {
667                 KASSERT(la->la_timer_period != 0, ("lapic%u: zero divisor",
668                     lapic_id()));
669                 switch (la->la_timer_mode) {
670                 case LAT_MODE_PERIODIC:
671                         lapic_timer_set_divisor(lapic_timer_divisor);
672                         lapic_timer_periodic(la);
673                         break;
674                 case LAT_MODE_ONESHOT:
675                         lapic_timer_set_divisor(lapic_timer_divisor);
676                         lapic_timer_oneshot(la);
677                         break;
678                 case LAT_MODE_DEADLINE:
679                         lapic_timer_deadline(la);
680                         break;
681                 default:
682                         panic("corrupted la_timer_mode %p %d", la,
683                             la->la_timer_mode);
684                 }
685         }
686
687         /* Program error LVT and clear any existing errors. */
688         lapic_write32(LAPIC_LVT_ERROR, lvt_mode(la, APIC_LVT_ERROR,
689             lapic_read32(LAPIC_LVT_ERROR)));
690         lapic_write32(LAPIC_ESR, 0);
691
692         /* XXX: Thermal LVT */
693
694         /* Program the CMCI LVT entry if present. */
695         if (maxlvt >= APIC_LVT_CMCI) {
696                 lapic_write32(LAPIC_LVT_CMCI, lvt_mode(la, APIC_LVT_CMCI,
697                     lapic_read32(LAPIC_LVT_CMCI)));
698         }
699             
700         intr_restore(saveintr);
701 }
702
703 static void
704 native_lapic_reenable_pmc(void)
705 {
706 #ifdef HWPMC_HOOKS
707         uint32_t value;
708
709         value = lapic_read32(LAPIC_LVT_PCINT);
710         value &= ~APIC_LVT_M;
711         lapic_write32(LAPIC_LVT_PCINT, value);
712 #endif
713 }
714
715 #ifdef HWPMC_HOOKS
716 static void
717 lapic_update_pmc(void *dummy)
718 {
719         struct lapic *la;
720
721         la = &lapics[lapic_id()];
722         lapic_write32(LAPIC_LVT_PCINT, lvt_mode(la, APIC_LVT_PMC,
723             lapic_read32(LAPIC_LVT_PCINT)));
724 }
725 #endif
726
727 static int
728 native_lapic_enable_pmc(void)
729 {
730 #ifdef HWPMC_HOOKS
731         u_int32_t maxlvt;
732
733         /* Fail if the local APIC is not present. */
734         if (!x2apic_mode && lapic_map == NULL)
735                 return (0);
736
737         /* Fail if the PMC LVT is not present. */
738         maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
739         if (maxlvt < APIC_LVT_PMC)
740                 return (0);
741
742         lvts[APIC_LVT_PMC].lvt_masked = 0;
743
744 #ifdef SMP
745         /*
746          * If hwpmc was loaded at boot time then the APs may not be
747          * started yet.  In that case, don't forward the request to
748          * them as they will program the lvt when they start.
749          */
750         if (smp_started)
751                 smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
752         else
753 #endif
754                 lapic_update_pmc(NULL);
755         return (1);
756 #else
757         return (0);
758 #endif
759 }
760
761 static void
762 native_lapic_disable_pmc(void)
763 {
764 #ifdef HWPMC_HOOKS
765         u_int32_t maxlvt;
766
767         /* Fail if the local APIC is not present. */
768         if (!x2apic_mode && lapic_map == NULL)
769                 return;
770
771         /* Fail if the PMC LVT is not present. */
772         maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
773         if (maxlvt < APIC_LVT_PMC)
774                 return;
775
776         lvts[APIC_LVT_PMC].lvt_masked = 1;
777
778 #ifdef SMP
779         /* The APs should always be started when hwpmc is unloaded. */
780         KASSERT(mp_ncpus == 1 || smp_started, ("hwpmc unloaded too early"));
781 #endif
782         smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
783 #endif
784 }
785
786 static void
787 lapic_calibrate_initcount(struct eventtimer *et, struct lapic *la)
788 {
789         u_long value;
790
791         /* Start off with a divisor of 2 (power on reset default). */
792         lapic_timer_divisor = 2;
793         /* Try to calibrate the local APIC timer. */
794         do {
795                 lapic_timer_set_divisor(lapic_timer_divisor);
796                 lapic_timer_oneshot_nointr(la, APIC_TIMER_MAX_COUNT);
797                 DELAY(1000000);
798                 value = APIC_TIMER_MAX_COUNT - lapic_read32(LAPIC_CCR_TIMER);
799                 if (value != APIC_TIMER_MAX_COUNT)
800                         break;
801                 lapic_timer_divisor <<= 1;
802         } while (lapic_timer_divisor <= 128);
803         if (lapic_timer_divisor > 128)
804                 panic("lapic: Divisor too big");
805         if (bootverbose) {
806                 printf("lapic: Divisor %lu, Frequency %lu Hz\n",
807                     lapic_timer_divisor, value);
808         }
809         et->et_frequency = value;
810 }
811
812 static void
813 lapic_calibrate_deadline(struct eventtimer *et, struct lapic *la __unused)
814 {
815
816         et->et_frequency = tsc_freq;
817         if (bootverbose) {
818                 printf("lapic: deadline tsc mode, Frequency %ju Hz\n",
819                     (uintmax_t)et->et_frequency);
820         }
821 }
822
823 static int
824 lapic_et_start(struct eventtimer *et, sbintime_t first, sbintime_t period)
825 {
826         struct lapic *la;
827
828         la = &lapics[PCPU_GET(apic_id)];
829         if (et->et_frequency == 0) {
830                 if (lapic_timer_tsc_deadline)
831                         lapic_calibrate_deadline(et, la);
832                 else
833                         lapic_calibrate_initcount(et, la);
834                 et->et_min_period = (0x00000002LLU << 32) / et->et_frequency;
835                 et->et_max_period = (0xfffffffeLLU << 32) / et->et_frequency;
836         }
837         if (period != 0) {
838                 if (la->la_timer_mode == LAT_MODE_UNDEF)
839                         lapic_timer_set_divisor(lapic_timer_divisor);
840                 la->la_timer_mode = LAT_MODE_PERIODIC;
841                 la->la_timer_period = ((uint32_t)et->et_frequency * period) >>
842                     32;
843                 lapic_timer_periodic(la);
844         } else if (lapic_timer_tsc_deadline) {
845                 la->la_timer_mode = LAT_MODE_DEADLINE;
846                 la->la_timer_period = (et->et_frequency * first) >> 32;
847                 lapic_timer_deadline(la);
848         } else {
849                 if (la->la_timer_mode == LAT_MODE_UNDEF)
850                         lapic_timer_set_divisor(lapic_timer_divisor);
851                 la->la_timer_mode = LAT_MODE_ONESHOT;
852                 la->la_timer_period = ((uint32_t)et->et_frequency * first) >>
853                     32;
854                 lapic_timer_oneshot(la);
855         }
856         return (0);
857 }
858
859 static int
860 lapic_et_stop(struct eventtimer *et)
861 {
862         struct lapic *la;
863
864         la = &lapics[PCPU_GET(apic_id)];
865         lapic_timer_stop(la);
866         la->la_timer_mode = LAT_MODE_UNDEF;
867         return (0);
868 }
869
870 static void
871 native_lapic_disable(void)
872 {
873         uint32_t value;
874
875         /* Software disable the local APIC. */
876         value = lapic_read32(LAPIC_SVR);
877         value &= ~APIC_SVR_SWEN;
878         lapic_write32(LAPIC_SVR, value);
879 }
880
881 static void
882 lapic_enable(void)
883 {
884         uint32_t value;
885
886         /* Program the spurious vector to enable the local APIC. */
887         value = lapic_read32(LAPIC_SVR);
888         value &= ~(APIC_SVR_VECTOR | APIC_SVR_FOCUS);
889         value |= APIC_SVR_FEN | APIC_SVR_SWEN | APIC_SPURIOUS_INT;
890         if (lapic_eoi_suppression)
891                 value |= APIC_SVR_EOI_SUPPRESSION;
892         lapic_write32(LAPIC_SVR, value);
893 }
894
895 /* Reset the local APIC on the BSP during resume. */
896 static void
897 lapic_resume(struct pic *pic, bool suspend_cancelled)
898 {
899
900         lapic_setup(0);
901 }
902
903 static int
904 native_lapic_id(void)
905 {
906         uint32_t v;
907
908         KASSERT(x2apic_mode || lapic_map != NULL, ("local APIC is not mapped"));
909         v = lapic_read32(LAPIC_ID);
910         if (!x2apic_mode)
911                 v >>= APIC_ID_SHIFT;
912         return (v);
913 }
914
915 static int
916 native_lapic_intr_pending(u_int vector)
917 {
918         uint32_t irr;
919
920         /*
921          * The IRR registers are an array of registers each of which
922          * only describes 32 interrupts in the low 32 bits.  Thus, we
923          * divide the vector by 32 to get the register index.
924          * Finally, we modulus the vector by 32 to determine the
925          * individual bit to test.
926          */
927         irr = lapic_read32(LAPIC_IRR0 + vector / 32);
928         return (irr & 1 << (vector % 32));
929 }
930
931 static void
932 native_lapic_set_logical_id(u_int apic_id, u_int cluster, u_int cluster_id)
933 {
934         struct lapic *la;
935
936         KASSERT(lapics[apic_id].la_present, ("%s: APIC %u doesn't exist",
937             __func__, apic_id));
938         KASSERT(cluster <= APIC_MAX_CLUSTER, ("%s: cluster %u too big",
939             __func__, cluster));
940         KASSERT(cluster_id <= APIC_MAX_INTRACLUSTER_ID,
941             ("%s: intra cluster id %u too big", __func__, cluster_id));
942         la = &lapics[apic_id];
943         la->la_cluster = cluster;
944         la->la_cluster_id = cluster_id;
945 }
946
947 static int
948 native_lapic_set_lvt_mask(u_int apic_id, u_int pin, u_char masked)
949 {
950
951         if (pin > APIC_LVT_MAX)
952                 return (EINVAL);
953         if (apic_id == APIC_ID_ALL) {
954                 lvts[pin].lvt_masked = masked;
955                 if (bootverbose)
956                         printf("lapic:");
957         } else {
958                 KASSERT(lapics[apic_id].la_present,
959                     ("%s: missing APIC %u", __func__, apic_id));
960                 lapics[apic_id].la_lvts[pin].lvt_masked = masked;
961                 lapics[apic_id].la_lvts[pin].lvt_active = 1;
962                 if (bootverbose)
963                         printf("lapic%u:", apic_id);
964         }
965         if (bootverbose)
966                 printf(" LINT%u %s\n", pin, masked ? "masked" : "unmasked");
967         return (0);
968 }
969
970 static int
971 native_lapic_set_lvt_mode(u_int apic_id, u_int pin, u_int32_t mode)
972 {
973         struct lvt *lvt;
974
975         if (pin > APIC_LVT_MAX)
976                 return (EINVAL);
977         if (apic_id == APIC_ID_ALL) {
978                 lvt = &lvts[pin];
979                 if (bootverbose)
980                         printf("lapic:");
981         } else {
982                 KASSERT(lapics[apic_id].la_present,
983                     ("%s: missing APIC %u", __func__, apic_id));
984                 lvt = &lapics[apic_id].la_lvts[pin];
985                 lvt->lvt_active = 1;
986                 if (bootverbose)
987                         printf("lapic%u:", apic_id);
988         }
989         lvt->lvt_mode = mode;
990         switch (mode) {
991         case APIC_LVT_DM_NMI:
992         case APIC_LVT_DM_SMI:
993         case APIC_LVT_DM_INIT:
994         case APIC_LVT_DM_EXTINT:
995                 lvt->lvt_edgetrigger = 1;
996                 lvt->lvt_activehi = 1;
997                 if (mode == APIC_LVT_DM_EXTINT)
998                         lvt->lvt_masked = 1;
999                 else
1000                         lvt->lvt_masked = 0;
1001                 break;
1002         default:
1003                 panic("Unsupported delivery mode: 0x%x\n", mode);
1004         }
1005         if (bootverbose) {
1006                 printf(" Routing ");
1007                 switch (mode) {
1008                 case APIC_LVT_DM_NMI:
1009                         printf("NMI");
1010                         break;
1011                 case APIC_LVT_DM_SMI:
1012                         printf("SMI");
1013                         break;
1014                 case APIC_LVT_DM_INIT:
1015                         printf("INIT");
1016                         break;
1017                 case APIC_LVT_DM_EXTINT:
1018                         printf("ExtINT");
1019                         break;
1020                 }
1021                 printf(" -> LINT%u\n", pin);
1022         }
1023         return (0);
1024 }
1025
1026 static int
1027 native_lapic_set_lvt_polarity(u_int apic_id, u_int pin, enum intr_polarity pol)
1028 {
1029
1030         if (pin > APIC_LVT_MAX || pol == INTR_POLARITY_CONFORM)
1031                 return (EINVAL);
1032         if (apic_id == APIC_ID_ALL) {
1033                 lvts[pin].lvt_activehi = (pol == INTR_POLARITY_HIGH);
1034                 if (bootverbose)
1035                         printf("lapic:");
1036         } else {
1037                 KASSERT(lapics[apic_id].la_present,
1038                     ("%s: missing APIC %u", __func__, apic_id));
1039                 lapics[apic_id].la_lvts[pin].lvt_active = 1;
1040                 lapics[apic_id].la_lvts[pin].lvt_activehi =
1041                     (pol == INTR_POLARITY_HIGH);
1042                 if (bootverbose)
1043                         printf("lapic%u:", apic_id);
1044         }
1045         if (bootverbose)
1046                 printf(" LINT%u polarity: %s\n", pin,
1047                     pol == INTR_POLARITY_HIGH ? "high" : "low");
1048         return (0);
1049 }
1050
1051 static int
1052 native_lapic_set_lvt_triggermode(u_int apic_id, u_int pin,
1053      enum intr_trigger trigger)
1054 {
1055
1056         if (pin > APIC_LVT_MAX || trigger == INTR_TRIGGER_CONFORM)
1057                 return (EINVAL);
1058         if (apic_id == APIC_ID_ALL) {
1059                 lvts[pin].lvt_edgetrigger = (trigger == INTR_TRIGGER_EDGE);
1060                 if (bootverbose)
1061                         printf("lapic:");
1062         } else {
1063                 KASSERT(lapics[apic_id].la_present,
1064                     ("%s: missing APIC %u", __func__, apic_id));
1065                 lapics[apic_id].la_lvts[pin].lvt_edgetrigger =
1066                     (trigger == INTR_TRIGGER_EDGE);
1067                 lapics[apic_id].la_lvts[pin].lvt_active = 1;
1068                 if (bootverbose)
1069                         printf("lapic%u:", apic_id);
1070         }
1071         if (bootverbose)
1072                 printf(" LINT%u trigger: %s\n", pin,
1073                     trigger == INTR_TRIGGER_EDGE ? "edge" : "level");
1074         return (0);
1075 }
1076
1077 /*
1078  * Adjust the TPR of the current CPU so that it blocks all interrupts below
1079  * the passed in vector.
1080  */
1081 static void
1082 lapic_set_tpr(u_int vector)
1083 {
1084 #ifdef CHEAP_TPR
1085         lapic_write32(LAPIC_TPR, vector);
1086 #else
1087         uint32_t tpr;
1088
1089         tpr = lapic_read32(LAPIC_TPR) & ~APIC_TPR_PRIO;
1090         tpr |= vector;
1091         lapic_write32(LAPIC_TPR, tpr);
1092 #endif
1093 }
1094
1095 static void
1096 native_lapic_eoi(void)
1097 {
1098
1099         lapic_write32_nofence(LAPIC_EOI, 0);
1100 }
1101
1102 void
1103 lapic_handle_intr(int vector, struct trapframe *frame)
1104 {
1105         struct intsrc *isrc;
1106
1107         isrc = intr_lookup_source(apic_idt_to_irq(PCPU_GET(apic_id),
1108             vector));
1109         intr_execute_handlers(isrc, frame);
1110 }
1111
1112 void
1113 lapic_handle_timer(struct trapframe *frame)
1114 {
1115         struct lapic *la;
1116         struct trapframe *oldframe;
1117         struct thread *td;
1118
1119         /* Send EOI first thing. */
1120         lapic_eoi();
1121
1122 #if defined(SMP) && !defined(SCHED_ULE)
1123         /*
1124          * Don't do any accounting for the disabled HTT cores, since it
1125          * will provide misleading numbers for the userland.
1126          *
1127          * No locking is necessary here, since even if we lose the race
1128          * when hlt_cpus_mask changes it is not a big deal, really.
1129          *
1130          * Don't do that for ULE, since ULE doesn't consider hlt_cpus_mask
1131          * and unlike other schedulers it actually schedules threads to
1132          * those CPUs.
1133          */
1134         if (CPU_ISSET(PCPU_GET(cpuid), &hlt_cpus_mask))
1135                 return;
1136 #endif
1137
1138         /* Look up our local APIC structure for the tick counters. */
1139         la = &lapics[PCPU_GET(apic_id)];
1140         (*la->la_timer_count)++;
1141         critical_enter();
1142         if (lapic_et.et_active) {
1143                 td = curthread;
1144                 td->td_intr_nesting_level++;
1145                 oldframe = td->td_intr_frame;
1146                 td->td_intr_frame = frame;
1147                 lapic_et.et_event_cb(&lapic_et, lapic_et.et_arg);
1148                 td->td_intr_frame = oldframe;
1149                 td->td_intr_nesting_level--;
1150         }
1151         critical_exit();
1152 }
1153
1154 static void
1155 lapic_timer_set_divisor(u_int divisor)
1156 {
1157
1158         KASSERT(powerof2(divisor), ("lapic: invalid divisor %u", divisor));
1159         KASSERT(ffs(divisor) <= sizeof(lapic_timer_divisors) /
1160             sizeof(u_int32_t), ("lapic: invalid divisor %u", divisor));
1161         lapic_write32(LAPIC_DCR_TIMER, lapic_timer_divisors[ffs(divisor) - 1]);
1162 }
1163
1164 static void
1165 lapic_timer_oneshot(struct lapic *la)
1166 {
1167         uint32_t value;
1168
1169         value = la->lvt_timer_base;
1170         value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1171         value |= APIC_LVTT_TM_ONE_SHOT;
1172         la->lvt_timer_last = value;
1173         lapic_write32(LAPIC_LVT_TIMER, value);
1174         lapic_write32(LAPIC_ICR_TIMER, la->la_timer_period);
1175 }
1176
1177 static void
1178 lapic_timer_oneshot_nointr(struct lapic *la, uint32_t count)
1179 {
1180         uint32_t value;
1181
1182         value = la->lvt_timer_base;
1183         value &= ~APIC_LVTT_TM;
1184         value |= APIC_LVTT_TM_ONE_SHOT | APIC_LVT_M;
1185         la->lvt_timer_last = value;
1186         lapic_write32(LAPIC_LVT_TIMER, value);
1187         lapic_write32(LAPIC_ICR_TIMER, count);
1188 }
1189
1190 static void
1191 lapic_timer_periodic(struct lapic *la)
1192 {
1193         uint32_t value;
1194
1195         value = la->lvt_timer_base;
1196         value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1197         value |= APIC_LVTT_TM_PERIODIC;
1198         la->lvt_timer_last = value;
1199         lapic_write32(LAPIC_LVT_TIMER, value);
1200         lapic_write32(LAPIC_ICR_TIMER, la->la_timer_period);
1201 }
1202
1203 static void
1204 lapic_timer_deadline(struct lapic *la)
1205 {
1206         uint32_t value;
1207
1208         value = la->lvt_timer_base;
1209         value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1210         value |= APIC_LVTT_TM_TSCDLT;
1211         if (value != la->lvt_timer_last) {
1212                 la->lvt_timer_last = value;
1213                 lapic_write32_nofence(LAPIC_LVT_TIMER, value);
1214                 if (!x2apic_mode)
1215                         mfence();
1216         }
1217         wrmsr(MSR_TSC_DEADLINE, la->la_timer_period + rdtsc());
1218 }
1219
1220 static void
1221 lapic_timer_stop(struct lapic *la)
1222 {
1223         uint32_t value;
1224
1225         if (la->la_timer_mode == LAT_MODE_DEADLINE) {
1226                 wrmsr(MSR_TSC_DEADLINE, 0);
1227                 mfence();
1228         } else {
1229                 value = la->lvt_timer_base;
1230                 value &= ~APIC_LVTT_TM;
1231                 value |= APIC_LVT_M;
1232                 la->lvt_timer_last = value;
1233                 lapic_write32(LAPIC_LVT_TIMER, value);
1234         }
1235 }
1236
1237 void
1238 lapic_handle_cmc(void)
1239 {
1240
1241         lapic_eoi();
1242         cmc_intr();
1243 }
1244
1245 /*
1246  * Called from the mca_init() to activate the CMC interrupt if this CPU is
1247  * responsible for monitoring any MC banks for CMC events.  Since mca_init()
1248  * is called prior to lapic_setup() during boot, this just needs to unmask
1249  * this CPU's LVT_CMCI entry.
1250  */
1251 static void
1252 native_lapic_enable_cmc(void)
1253 {
1254         u_int apic_id;
1255
1256 #ifdef DEV_ATPIC
1257         if (!x2apic_mode && lapic_map == NULL)
1258                 return;
1259 #endif
1260         apic_id = PCPU_GET(apic_id);
1261         KASSERT(lapics[apic_id].la_present,
1262             ("%s: missing APIC %u", __func__, apic_id));
1263         lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_masked = 0;
1264         lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_active = 1;
1265         if (bootverbose)
1266                 printf("lapic%u: CMCI unmasked\n", apic_id);
1267 }
1268
1269 void
1270 lapic_handle_error(void)
1271 {
1272         uint32_t esr;
1273
1274         /*
1275          * Read the contents of the error status register.  Write to
1276          * the register first before reading from it to force the APIC
1277          * to update its value to indicate any errors that have
1278          * occurred since the previous write to the register.
1279          */
1280         lapic_write32(LAPIC_ESR, 0);
1281         esr = lapic_read32(LAPIC_ESR);
1282
1283         printf("CPU%d: local APIC error 0x%x\n", PCPU_GET(cpuid), esr);
1284         lapic_eoi();
1285 }
1286
1287 static u_int
1288 native_apic_cpuid(u_int apic_id)
1289 {
1290 #ifdef SMP
1291         return apic_cpuids[apic_id];
1292 #else
1293         return 0;
1294 #endif
1295 }
1296
1297 /* Request a free IDT vector to be used by the specified IRQ. */
1298 static u_int
1299 native_apic_alloc_vector(u_int apic_id, u_int irq)
1300 {
1301         u_int vector;
1302
1303         KASSERT(irq < NUM_IO_INTS, ("Invalid IRQ %u", irq));
1304
1305         /*
1306          * Search for a free vector.  Currently we just use a very simple
1307          * algorithm to find the first free vector.
1308          */
1309         mtx_lock_spin(&icu_lock);
1310         for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
1311                 if (lapics[apic_id].la_ioint_irqs[vector] != -1)
1312                         continue;
1313                 lapics[apic_id].la_ioint_irqs[vector] = irq;
1314                 mtx_unlock_spin(&icu_lock);
1315                 return (vector + APIC_IO_INTS);
1316         }
1317         mtx_unlock_spin(&icu_lock);
1318         return (0);
1319 }
1320
1321 /*
1322  * Request 'count' free contiguous IDT vectors to be used by 'count'
1323  * IRQs.  'count' must be a power of two and the vectors will be
1324  * aligned on a boundary of 'align'.  If the request cannot be
1325  * satisfied, 0 is returned.
1326  */
1327 static u_int
1328 native_apic_alloc_vectors(u_int apic_id, u_int *irqs, u_int count, u_int align)
1329 {
1330         u_int first, run, vector;
1331
1332         KASSERT(powerof2(count), ("bad count"));
1333         KASSERT(powerof2(align), ("bad align"));
1334         KASSERT(align >= count, ("align < count"));
1335 #ifdef INVARIANTS
1336         for (run = 0; run < count; run++)
1337                 KASSERT(irqs[run] < NUM_IO_INTS, ("Invalid IRQ %u at index %u",
1338                     irqs[run], run));
1339 #endif
1340
1341         /*
1342          * Search for 'count' free vectors.  As with apic_alloc_vector(),
1343          * this just uses a simple first fit algorithm.
1344          */
1345         run = 0;
1346         first = 0;
1347         mtx_lock_spin(&icu_lock);
1348         for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
1349
1350                 /* Vector is in use, end run. */
1351                 if (lapics[apic_id].la_ioint_irqs[vector] != -1) {
1352                         run = 0;
1353                         first = 0;
1354                         continue;
1355                 }
1356
1357                 /* Start a new run if run == 0 and vector is aligned. */
1358                 if (run == 0) {
1359                         if ((vector & (align - 1)) != 0)
1360                                 continue;
1361                         first = vector;
1362                 }
1363                 run++;
1364
1365                 /* Keep looping if the run isn't long enough yet. */
1366                 if (run < count)
1367                         continue;
1368
1369                 /* Found a run, assign IRQs and return the first vector. */
1370                 for (vector = 0; vector < count; vector++)
1371                         lapics[apic_id].la_ioint_irqs[first + vector] =
1372                             irqs[vector];
1373                 mtx_unlock_spin(&icu_lock);
1374                 return (first + APIC_IO_INTS);
1375         }
1376         mtx_unlock_spin(&icu_lock);
1377         printf("APIC: Couldn't find APIC vectors for %u IRQs\n", count);
1378         return (0);
1379 }
1380
1381 /*
1382  * Enable a vector for a particular apic_id.  Since all lapics share idt
1383  * entries and ioint_handlers this enables the vector on all lapics.  lapics
1384  * which do not have the vector configured would report spurious interrupts
1385  * should it fire.
1386  */
1387 static void
1388 native_apic_enable_vector(u_int apic_id, u_int vector)
1389 {
1390
1391         KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1392         KASSERT(ioint_handlers[vector / 32] != NULL,
1393             ("No ISR handler for vector %u", vector));
1394 #ifdef KDTRACE_HOOKS
1395         KASSERT(vector != IDT_DTRACE_RET,
1396             ("Attempt to overwrite DTrace entry"));
1397 #endif
1398         setidt(vector, ioint_handlers[vector / 32], SDT_APIC, SEL_KPL,
1399             GSEL_APIC);
1400 }
1401
1402 static void
1403 native_apic_disable_vector(u_int apic_id, u_int vector)
1404 {
1405
1406         KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1407 #ifdef KDTRACE_HOOKS
1408         KASSERT(vector != IDT_DTRACE_RET,
1409             ("Attempt to overwrite DTrace entry"));
1410 #endif
1411         KASSERT(ioint_handlers[vector / 32] != NULL,
1412             ("No ISR handler for vector %u", vector));
1413 #ifdef notyet
1414         /*
1415          * We can not currently clear the idt entry because other cpus
1416          * may have a valid vector at this offset.
1417          */
1418         setidt(vector, &IDTVEC(rsvd), SDT_APICT, SEL_KPL, GSEL_APIC);
1419 #endif
1420 }
1421
1422 /* Release an APIC vector when it's no longer in use. */
1423 static void
1424 native_apic_free_vector(u_int apic_id, u_int vector, u_int irq)
1425 {
1426         struct thread *td;
1427
1428         KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1429             vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1430             ("Vector %u does not map to an IRQ line", vector));
1431         KASSERT(irq < NUM_IO_INTS, ("Invalid IRQ %u", irq));
1432         KASSERT(lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] ==
1433             irq, ("IRQ mismatch"));
1434 #ifdef KDTRACE_HOOKS
1435         KASSERT(vector != IDT_DTRACE_RET,
1436             ("Attempt to overwrite DTrace entry"));
1437 #endif
1438
1439         /*
1440          * Bind us to the cpu that owned the vector before freeing it so
1441          * we don't lose an interrupt delivery race.
1442          */
1443         td = curthread;
1444         if (!rebooting) {
1445                 thread_lock(td);
1446                 if (sched_is_bound(td))
1447                         panic("apic_free_vector: Thread already bound.\n");
1448                 sched_bind(td, apic_cpuid(apic_id));
1449                 thread_unlock(td);
1450         }
1451         mtx_lock_spin(&icu_lock);
1452         lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] = -1;
1453         mtx_unlock_spin(&icu_lock);
1454         if (!rebooting) {
1455                 thread_lock(td);
1456                 sched_unbind(td);
1457                 thread_unlock(td);
1458         }
1459 }
1460
1461 /* Map an IDT vector (APIC) to an IRQ (interrupt source). */
1462 static u_int
1463 apic_idt_to_irq(u_int apic_id, u_int vector)
1464 {
1465         int irq;
1466
1467         KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1468             vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1469             ("Vector %u does not map to an IRQ line", vector));
1470 #ifdef KDTRACE_HOOKS
1471         KASSERT(vector != IDT_DTRACE_RET,
1472             ("Attempt to overwrite DTrace entry"));
1473 #endif
1474         irq = lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS];
1475         if (irq < 0)
1476                 irq = 0;
1477         return (irq);
1478 }
1479
1480 #ifdef DDB
1481 /*
1482  * Dump data about APIC IDT vector mappings.
1483  */
1484 DB_SHOW_COMMAND(apic, db_show_apic)
1485 {
1486         struct intsrc *isrc;
1487         int i, verbose;
1488         u_int apic_id;
1489         u_int irq;
1490
1491         if (strcmp(modif, "vv") == 0)
1492                 verbose = 2;
1493         else if (strcmp(modif, "v") == 0)
1494                 verbose = 1;
1495         else
1496                 verbose = 0;
1497         for (apic_id = 0; apic_id <= MAX_APIC_ID; apic_id++) {
1498                 if (lapics[apic_id].la_present == 0)
1499                         continue;
1500                 db_printf("Interrupts bound to lapic %u\n", apic_id);
1501                 for (i = 0; i < APIC_NUM_IOINTS + 1 && !db_pager_quit; i++) {
1502                         irq = lapics[apic_id].la_ioint_irqs[i];
1503                         if (irq == -1 || irq == IRQ_SYSCALL)
1504                                 continue;
1505 #ifdef KDTRACE_HOOKS
1506                         if (irq == IRQ_DTRACE_RET)
1507                                 continue;
1508 #endif
1509 #ifdef XENHVM
1510                         if (irq == IRQ_EVTCHN)
1511                                 continue;
1512 #endif
1513                         db_printf("vec 0x%2x -> ", i + APIC_IO_INTS);
1514                         if (irq == IRQ_TIMER)
1515                                 db_printf("lapic timer\n");
1516                         else if (irq < NUM_IO_INTS) {
1517                                 isrc = intr_lookup_source(irq);
1518                                 if (isrc == NULL || verbose == 0)
1519                                         db_printf("IRQ %u\n", irq);
1520                                 else
1521                                         db_dump_intr_event(isrc->is_event,
1522                                             verbose == 2);
1523                         } else
1524                                 db_printf("IRQ %u ???\n", irq);
1525                 }
1526         }
1527 }
1528
1529 static void
1530 dump_mask(const char *prefix, uint32_t v, int base)
1531 {
1532         int i, first;
1533
1534         first = 1;
1535         for (i = 0; i < 32; i++)
1536                 if (v & (1 << i)) {
1537                         if (first) {
1538                                 db_printf("%s:", prefix);
1539                                 first = 0;
1540                         }
1541                         db_printf(" %02x", base + i);
1542                 }
1543         if (!first)
1544                 db_printf("\n");
1545 }
1546
1547 /* Show info from the lapic regs for this CPU. */
1548 DB_SHOW_COMMAND(lapic, db_show_lapic)
1549 {
1550         uint32_t v;
1551
1552         db_printf("lapic ID = %d\n", lapic_id());
1553         v = lapic_read32(LAPIC_VERSION);
1554         db_printf("version  = %d.%d\n", (v & APIC_VER_VERSION) >> 4,
1555             v & 0xf);
1556         db_printf("max LVT  = %d\n", (v & APIC_VER_MAXLVT) >> MAXLVTSHIFT);
1557         v = lapic_read32(LAPIC_SVR);
1558         db_printf("SVR      = %02x (%s)\n", v & APIC_SVR_VECTOR,
1559             v & APIC_SVR_ENABLE ? "enabled" : "disabled");
1560         db_printf("TPR      = %02x\n", lapic_read32(LAPIC_TPR));
1561
1562 #define dump_field(prefix, regn, index)                                 \
1563         dump_mask(__XSTRING(prefix ## index),                           \
1564             lapic_read32(LAPIC_ ## regn ## index),                      \
1565             index * 32)
1566
1567         db_printf("In-service Interrupts:\n");
1568         dump_field(isr, ISR, 0);
1569         dump_field(isr, ISR, 1);
1570         dump_field(isr, ISR, 2);
1571         dump_field(isr, ISR, 3);
1572         dump_field(isr, ISR, 4);
1573         dump_field(isr, ISR, 5);
1574         dump_field(isr, ISR, 6);
1575         dump_field(isr, ISR, 7);
1576
1577         db_printf("TMR Interrupts:\n");
1578         dump_field(tmr, TMR, 0);
1579         dump_field(tmr, TMR, 1);
1580         dump_field(tmr, TMR, 2);
1581         dump_field(tmr, TMR, 3);
1582         dump_field(tmr, TMR, 4);
1583         dump_field(tmr, TMR, 5);
1584         dump_field(tmr, TMR, 6);
1585         dump_field(tmr, TMR, 7);
1586
1587         db_printf("IRR Interrupts:\n");
1588         dump_field(irr, IRR, 0);
1589         dump_field(irr, IRR, 1);
1590         dump_field(irr, IRR, 2);
1591         dump_field(irr, IRR, 3);
1592         dump_field(irr, IRR, 4);
1593         dump_field(irr, IRR, 5);
1594         dump_field(irr, IRR, 6);
1595         dump_field(irr, IRR, 7);
1596
1597 #undef dump_field
1598 }
1599 #endif
1600
1601 /*
1602  * APIC probing support code.  This includes code to manage enumerators.
1603  */
1604
1605 static SLIST_HEAD(, apic_enumerator) enumerators =
1606         SLIST_HEAD_INITIALIZER(enumerators);
1607 static struct apic_enumerator *best_enum;
1608
1609 void
1610 apic_register_enumerator(struct apic_enumerator *enumerator)
1611 {
1612 #ifdef INVARIANTS
1613         struct apic_enumerator *apic_enum;
1614
1615         SLIST_FOREACH(apic_enum, &enumerators, apic_next) {
1616                 if (apic_enum == enumerator)
1617                         panic("%s: Duplicate register of %s", __func__,
1618                             enumerator->apic_name);
1619         }
1620 #endif
1621         SLIST_INSERT_HEAD(&enumerators, enumerator, apic_next);
1622 }
1623
1624 /*
1625  * We have to look for CPU's very, very early because certain subsystems
1626  * want to know how many CPU's we have extremely early on in the boot
1627  * process.
1628  */
1629 static void
1630 apic_init(void *dummy __unused)
1631 {
1632         struct apic_enumerator *enumerator;
1633         int retval, best;
1634
1635         /* We only support built in local APICs. */
1636         if (!(cpu_feature & CPUID_APIC))
1637                 return;
1638
1639         /* Don't probe if APIC mode is disabled. */
1640         if (resource_disabled("apic", 0))
1641                 return;
1642
1643         /* Probe all the enumerators to find the best match. */
1644         best_enum = NULL;
1645         best = 0;
1646         SLIST_FOREACH(enumerator, &enumerators, apic_next) {
1647                 retval = enumerator->apic_probe();
1648                 if (retval > 0)
1649                         continue;
1650                 if (best_enum == NULL || best < retval) {
1651                         best_enum = enumerator;
1652                         best = retval;
1653                 }
1654         }
1655         if (best_enum == NULL) {
1656                 if (bootverbose)
1657                         printf("APIC: Could not find any APICs.\n");
1658 #ifndef DEV_ATPIC
1659                 panic("running without device atpic requires a local APIC");
1660 #endif
1661                 return;
1662         }
1663
1664         if (bootverbose)
1665                 printf("APIC: Using the %s enumerator.\n",
1666                     best_enum->apic_name);
1667
1668 #ifdef I686_CPU
1669         /*
1670          * To work around an errata, we disable the local APIC on some
1671          * CPUs during early startup.  We need to turn the local APIC back
1672          * on on such CPUs now.
1673          */
1674         ppro_reenable_apic();
1675 #endif
1676
1677         /* Probe the CPU's in the system. */
1678         retval = best_enum->apic_probe_cpus();
1679         if (retval != 0)
1680                 printf("%s: Failed to probe CPUs: returned %d\n",
1681                     best_enum->apic_name, retval);
1682
1683 }
1684 SYSINIT(apic_init, SI_SUB_TUNABLES - 1, SI_ORDER_SECOND, apic_init, NULL);
1685
1686 /*
1687  * Setup the local APIC.  We have to do this prior to starting up the APs
1688  * in the SMP case.
1689  */
1690 static void
1691 apic_setup_local(void *dummy __unused)
1692 {
1693         int retval;
1694  
1695         if (best_enum == NULL)
1696                 return;
1697
1698         /* Initialize the local APIC. */
1699         retval = best_enum->apic_setup_local();
1700         if (retval != 0)
1701                 printf("%s: Failed to setup the local APIC: returned %d\n",
1702                     best_enum->apic_name, retval);
1703 }
1704 SYSINIT(apic_setup_local, SI_SUB_CPU, SI_ORDER_SECOND, apic_setup_local, NULL);
1705
1706 /*
1707  * Setup the I/O APICs.
1708  */
1709 static void
1710 apic_setup_io(void *dummy __unused)
1711 {
1712         int retval;
1713
1714         if (best_enum == NULL)
1715                 return;
1716
1717         /*
1718          * Local APIC must be registered before other PICs and pseudo PICs
1719          * for proper suspend/resume order.
1720          */
1721         intr_register_pic(&lapic_pic);
1722
1723         retval = best_enum->apic_setup_io();
1724         if (retval != 0)
1725                 printf("%s: Failed to setup I/O APICs: returned %d\n",
1726                     best_enum->apic_name, retval);
1727
1728         /*
1729          * Finish setting up the local APIC on the BSP once we know
1730          * how to properly program the LINT pins.  In particular, this
1731          * enables the EOI suppression mode, if LAPIC support it and
1732          * user did not disabled the mode.
1733          */
1734         lapic_setup(1);
1735         if (bootverbose)
1736                 lapic_dump("BSP");
1737
1738         /* Enable the MSI "pic". */
1739         init_ops.msi_init();
1740 }
1741 SYSINIT(apic_setup_io, SI_SUB_INTR, SI_ORDER_THIRD, apic_setup_io, NULL);
1742
1743 #ifdef SMP
1744 /*
1745  * Inter Processor Interrupt functions.  The lapic_ipi_*() functions are
1746  * private to the MD code.  The public interface for the rest of the
1747  * kernel is defined in mp_machdep.c.
1748  */
1749
1750 /*
1751  * Wait delay microseconds for IPI to be sent.  If delay is -1, we
1752  * wait forever.
1753  */
1754 static int
1755 native_lapic_ipi_wait(int delay)
1756 {
1757         uint64_t i, counter;
1758
1759         /* LAPIC_ICR.APIC_DELSTAT_MASK is undefined in x2APIC mode */
1760         if (x2apic_mode || delay == -1)
1761                 return (1);
1762
1763         counter = lapic_ipi_wait_mult * delay;
1764         for (i = 0; i < counter; i++) {
1765                 if ((lapic_read_icr_lo() & APIC_DELSTAT_MASK) ==
1766                     APIC_DELSTAT_IDLE)
1767                         return (1);
1768                 ia32_pause();
1769         }
1770         return (0);
1771 }
1772
1773 static void
1774 native_lapic_ipi_raw(register_t icrlo, u_int dest)
1775 {
1776         uint64_t icr;
1777         uint32_t vhi, vlo;
1778         register_t saveintr;
1779
1780         /* XXX: Need more sanity checking of icrlo? */
1781         KASSERT(x2apic_mode || lapic_map != NULL,
1782             ("%s called too early", __func__));
1783         KASSERT(x2apic_mode ||
1784             (dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
1785             ("%s: invalid dest field", __func__));
1786         KASSERT((icrlo & APIC_ICRLO_RESV_MASK) == 0,
1787             ("%s: reserved bits set in ICR LO register", __func__));
1788
1789         /* Set destination in ICR HI register if it is being used. */
1790         if (!x2apic_mode) {
1791                 saveintr = intr_disable();
1792                 icr = lapic_read_icr();
1793         }
1794
1795         if ((icrlo & APIC_DEST_MASK) == APIC_DEST_DESTFLD) {
1796                 if (x2apic_mode) {
1797                         vhi = dest;
1798                 } else {
1799                         vhi = icr >> 32;
1800                         vhi &= ~APIC_ID_MASK;
1801                         vhi |= dest << APIC_ID_SHIFT;
1802                 }
1803         } else {
1804                 vhi = 0;
1805         }
1806
1807         /* Program the contents of the IPI and dispatch it. */
1808         if (x2apic_mode) {
1809                 vlo = icrlo;
1810         } else {
1811                 vlo = icr;
1812                 vlo &= APIC_ICRLO_RESV_MASK;
1813                 vlo |= icrlo;
1814         }
1815         lapic_write_icr(vhi, vlo);
1816         if (!x2apic_mode)
1817                 intr_restore(saveintr);
1818 }
1819
1820 #define BEFORE_SPIN     50000
1821 #ifdef DETECT_DEADLOCK
1822 #define AFTER_SPIN      50
1823 #endif
1824
1825 static void
1826 native_lapic_ipi_vectored(u_int vector, int dest)
1827 {
1828         register_t icrlo, destfield;
1829
1830         KASSERT((vector & ~APIC_VECTOR_MASK) == 0,
1831             ("%s: invalid vector %d", __func__, vector));
1832
1833         icrlo = APIC_DESTMODE_PHY | APIC_TRIGMOD_EDGE | APIC_LEVEL_ASSERT;
1834
1835         /*
1836          * NMI IPIs are just fake vectors used to send a NMI.  Use special rules
1837          * regarding NMIs if passed, otherwise specify the vector.
1838          */
1839         if (vector >= IPI_NMI_FIRST)
1840                 icrlo |= APIC_DELMODE_NMI;
1841         else
1842                 icrlo |= vector | APIC_DELMODE_FIXED;
1843         destfield = 0;
1844         switch (dest) {
1845         case APIC_IPI_DEST_SELF:
1846                 icrlo |= APIC_DEST_SELF;
1847                 break;
1848         case APIC_IPI_DEST_ALL:
1849                 icrlo |= APIC_DEST_ALLISELF;
1850                 break;
1851         case APIC_IPI_DEST_OTHERS:
1852                 icrlo |= APIC_DEST_ALLESELF;
1853                 break;
1854         default:
1855                 KASSERT(x2apic_mode ||
1856                     (dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
1857                     ("%s: invalid destination 0x%x", __func__, dest));
1858                 destfield = dest;
1859         }
1860
1861         /* Wait for an earlier IPI to finish. */
1862         if (!lapic_ipi_wait(BEFORE_SPIN)) {
1863                 if (panicstr != NULL)
1864                         return;
1865                 else
1866                         panic("APIC: Previous IPI is stuck");
1867         }
1868
1869         lapic_ipi_raw(icrlo, destfield);
1870
1871 #ifdef DETECT_DEADLOCK
1872         /* Wait for IPI to be delivered. */
1873         if (!lapic_ipi_wait(AFTER_SPIN)) {
1874 #ifdef needsattention
1875                 /*
1876                  * XXX FIXME:
1877                  *
1878                  * The above function waits for the message to actually be
1879                  * delivered.  It breaks out after an arbitrary timeout
1880                  * since the message should eventually be delivered (at
1881                  * least in theory) and that if it wasn't we would catch
1882                  * the failure with the check above when the next IPI is
1883                  * sent.
1884                  *
1885                  * We could skip this wait entirely, EXCEPT it probably
1886                  * protects us from other routines that assume that the
1887                  * message was delivered and acted upon when this function
1888                  * returns.
1889                  */
1890                 printf("APIC: IPI might be stuck\n");
1891 #else /* !needsattention */
1892                 /* Wait until mesage is sent without a timeout. */
1893                 while (lapic_read_icr_lo() & APIC_DELSTAT_PEND)
1894                         ia32_pause();
1895 #endif /* needsattention */
1896         }
1897 #endif /* DETECT_DEADLOCK */
1898 }
1899
1900 /*
1901  * Since the IDT is shared by all CPUs the IPI slot update needs to be globally
1902  * visible.
1903  *
1904  * Consider the case where an IPI is generated immediately after allocation:
1905  *     vector = lapic_ipi_alloc(ipifunc);
1906  *     ipi_selected(other_cpus, vector);
1907  *
1908  * In xAPIC mode a write to ICR_LO has serializing semantics because the
1909  * APIC page is mapped as an uncached region. In x2APIC mode there is an
1910  * explicit 'mfence' before the ICR MSR is written. Therefore in both cases
1911  * the IDT slot update is globally visible before the IPI is delivered.
1912  */
1913 static int
1914 native_lapic_ipi_alloc(inthand_t *ipifunc)
1915 {
1916         struct gate_descriptor *ip;
1917         long func;
1918         int idx, vector;
1919
1920         KASSERT(ipifunc != &IDTVEC(rsvd), ("invalid ipifunc %p", ipifunc));
1921
1922         vector = -1;
1923         mtx_lock_spin(&icu_lock);
1924         for (idx = IPI_DYN_FIRST; idx <= IPI_DYN_LAST; idx++) {
1925                 ip = &idt[idx];
1926                 func = (ip->gd_hioffset << 16) | ip->gd_looffset;
1927                 if (func == (uintptr_t)&IDTVEC(rsvd)) {
1928                         vector = idx;
1929                         setidt(vector, ipifunc, SDT_APIC, SEL_KPL, GSEL_APIC);
1930                         break;
1931                 }
1932         }
1933         mtx_unlock_spin(&icu_lock);
1934         return (vector);
1935 }
1936
1937 static void
1938 native_lapic_ipi_free(int vector)
1939 {
1940         struct gate_descriptor *ip;
1941         long func;
1942
1943         KASSERT(vector >= IPI_DYN_FIRST && vector <= IPI_DYN_LAST,
1944             ("%s: invalid vector %d", __func__, vector));
1945
1946         mtx_lock_spin(&icu_lock);
1947         ip = &idt[vector];
1948         func = (ip->gd_hioffset << 16) | ip->gd_looffset;
1949         KASSERT(func != (uintptr_t)&IDTVEC(rsvd),
1950             ("invalid idtfunc %#lx", func));
1951         setidt(vector, &IDTVEC(rsvd), SDT_APICT, SEL_KPL, GSEL_APIC);
1952         mtx_unlock_spin(&icu_lock);
1953 }
1954
1955 #endif /* SMP */