]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/x86/x86/local_apic.c
Checkpoint initial integration work
[FreeBSD/FreeBSD.git] / sys / x86 / x86 / local_apic.c
1 /*-
2  * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
3  * Copyright (c) 1996, by Steve Passe
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. The name of the developer may NOT be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29
30 /*
31  * Local APIC support on Pentium and later processors.
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_atpic.h"
38 #include "opt_hwpmc_hooks.h"
39
40 #include "opt_ddb.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/pcpu.h>
49 #include <sys/proc.h>
50 #include <sys/sched.h>
51 #include <sys/smp.h>
52 #include <sys/sysctl.h>
53 #include <sys/timeet.h>
54
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57
58 #include <x86/apicreg.h>
59 #include <machine/clock.h>
60 #include <machine/cpufunc.h>
61 #include <machine/cputypes.h>
62 #include <machine/frame.h>
63 #include <machine/intr_machdep.h>
64 #include <x86/apicvar.h>
65 #include <x86/mca.h>
66 #include <machine/md_var.h>
67 #include <machine/smp.h>
68 #include <machine/specialreg.h>
69 #include <x86/init.h>
70
71 #ifdef DDB
72 #include <sys/interrupt.h>
73 #include <ddb/ddb.h>
74 #endif
75
76 #ifdef __amd64__
77 #define SDT_APIC        SDT_SYSIGT
78 #define SDT_APICT       SDT_SYSIGT
79 #define GSEL_APIC       0
80 #else
81 #define SDT_APIC        SDT_SYS386IGT
82 #define SDT_APICT       SDT_SYS386TGT
83 #define GSEL_APIC       GSEL(GCODE_SEL, SEL_KPL)
84 #endif
85
86 /* Sanity checks on IDT vectors. */
87 CTASSERT(APIC_IO_INTS + APIC_NUM_IOINTS == APIC_TIMER_INT);
88 CTASSERT(APIC_TIMER_INT < APIC_LOCAL_INTS);
89 CTASSERT(APIC_LOCAL_INTS == 240);
90 CTASSERT(IPI_STOP < APIC_SPURIOUS_INT);
91
92 /* Magic IRQ values for the timer and syscalls. */
93 #define IRQ_TIMER       (NUM_IO_INTS + 1)
94 #define IRQ_SYSCALL     (NUM_IO_INTS + 2)
95 #define IRQ_DTRACE_RET  (NUM_IO_INTS + 3)
96 #define IRQ_EVTCHN      (NUM_IO_INTS + 4)
97
98 enum lat_timer_mode {
99         LAT_MODE_UNDEF =        0,
100         LAT_MODE_PERIODIC =     1,
101         LAT_MODE_ONESHOT =      2,
102         LAT_MODE_DEADLINE =     3,
103 };
104
105 /*
106  * Support for local APICs.  Local APICs manage interrupts on each
107  * individual processor as opposed to I/O APICs which receive interrupts
108  * from I/O devices and then forward them on to the local APICs.
109  *
110  * Local APICs can also send interrupts to each other thus providing the
111  * mechanism for IPIs.
112  */
113
114 struct lvt {
115         u_int lvt_edgetrigger:1;
116         u_int lvt_activehi:1;
117         u_int lvt_masked:1;
118         u_int lvt_active:1;
119         u_int lvt_mode:16;
120         u_int lvt_vector:8;
121 };
122
123 struct lapic {
124         struct lvt la_lvts[APIC_LVT_MAX + 1];
125         u_int la_id:8;
126         u_int la_cluster:4;
127         u_int la_cluster_id:2;
128         u_int la_present:1;
129         u_long *la_timer_count;
130         uint64_t la_timer_period;
131         enum lat_timer_mode la_timer_mode;
132         uint32_t lvt_timer_base;
133         uint32_t lvt_timer_last;
134         /* Include IDT_SYSCALL to make indexing easier. */
135         int la_ioint_irqs[APIC_NUM_IOINTS + 1];
136 } static lapics[MAX_APIC_ID + 1];
137
138 /* Global defaults for local APIC LVT entries. */
139 static struct lvt lvts[APIC_LVT_MAX + 1] = {
140         { 1, 1, 1, 1, APIC_LVT_DM_EXTINT, 0 },  /* LINT0: masked ExtINT */
141         { 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 },     /* LINT1: NMI */
142         { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_TIMER_INT },      /* Timer */
143         { 1, 1, 0, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT },      /* Error */
144         { 1, 1, 1, 1, APIC_LVT_DM_NMI, 0 },     /* PMC */
145         { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_THERMAL_INT },    /* Thermal */
146         { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_CMC_INT },        /* CMCI */
147 };
148
149 static inthand_t *ioint_handlers[] = {
150         NULL,                   /* 0 - 31 */
151         IDTVEC(apic_isr1),      /* 32 - 63 */
152         IDTVEC(apic_isr2),      /* 64 - 95 */
153         IDTVEC(apic_isr3),      /* 96 - 127 */
154         IDTVEC(apic_isr4),      /* 128 - 159 */
155         IDTVEC(apic_isr5),      /* 160 - 191 */
156         IDTVEC(apic_isr6),      /* 192 - 223 */
157         IDTVEC(apic_isr7),      /* 224 - 255 */
158 };
159
160
161 static u_int32_t lapic_timer_divisors[] = {
162         APIC_TDCR_1, APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16,
163         APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128
164 };
165
166 extern inthand_t IDTVEC(rsvd);
167
168 volatile char *lapic_map;
169 vm_paddr_t lapic_paddr;
170 int x2apic_mode;
171 int lapic_eoi_suppression;
172 static int lapic_timer_tsc_deadline;
173 static u_long lapic_timer_divisor, count_freq;
174 static struct eventtimer lapic_et;
175 #ifdef SMP
176 static uint64_t lapic_ipi_wait_mult;
177 #endif
178
179 SYSCTL_NODE(_hw, OID_AUTO, apic, CTLFLAG_RD, 0, "APIC options");
180 SYSCTL_INT(_hw_apic, OID_AUTO, x2apic_mode, CTLFLAG_RD, &x2apic_mode, 0, "");
181 SYSCTL_INT(_hw_apic, OID_AUTO, eoi_suppression, CTLFLAG_RD,
182     &lapic_eoi_suppression, 0, "");
183 SYSCTL_INT(_hw_apic, OID_AUTO, timer_tsc_deadline, CTLFLAG_RD,
184     &lapic_timer_tsc_deadline, 0, "");
185
186 static uint32_t
187 lapic_read32(enum LAPIC_REGISTERS reg)
188 {
189         uint32_t res;
190
191         if (x2apic_mode) {
192                 res = rdmsr32(MSR_APIC_000 + reg);
193         } else {
194                 res = *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL);
195         }
196         return (res);
197 }
198
199 static void
200 lapic_write32(enum LAPIC_REGISTERS reg, uint32_t val)
201 {
202
203         if (x2apic_mode) {
204                 mfence();
205                 wrmsr(MSR_APIC_000 + reg, val);
206         } else {
207                 *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL) = val;
208         }
209 }
210
211 static void
212 lapic_write32_nofence(enum LAPIC_REGISTERS reg, uint32_t val)
213 {
214
215         if (x2apic_mode) {
216                 wrmsr(MSR_APIC_000 + reg, val);
217         } else {
218                 *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL) = val;
219         }
220 }
221
222 #ifdef SMP
223 static uint64_t
224 lapic_read_icr(void)
225 {
226         uint64_t v;
227         uint32_t vhi, vlo;
228
229         if (x2apic_mode) {
230                 v = rdmsr(MSR_APIC_000 + LAPIC_ICR_LO);
231         } else {
232                 vhi = lapic_read32(LAPIC_ICR_HI);
233                 vlo = lapic_read32(LAPIC_ICR_LO);
234                 v = ((uint64_t)vhi << 32) | vlo;
235         }
236         return (v);
237 }
238
239 static uint64_t
240 lapic_read_icr_lo(void)
241 {
242
243         return (lapic_read32(LAPIC_ICR_LO));
244 }
245
246 static void
247 lapic_write_icr(uint32_t vhi, uint32_t vlo)
248 {
249         uint64_t v;
250
251         if (x2apic_mode) {
252                 v = ((uint64_t)vhi << 32) | vlo;
253                 mfence();
254                 wrmsr(MSR_APIC_000 + LAPIC_ICR_LO, v);
255         } else {
256                 lapic_write32(LAPIC_ICR_HI, vhi);
257                 lapic_write32(LAPIC_ICR_LO, vlo);
258         }
259 }
260 #endif /* SMP */
261
262 static void
263 native_lapic_enable_x2apic(void)
264 {
265         uint64_t apic_base;
266
267         apic_base = rdmsr(MSR_APICBASE);
268         apic_base |= APICBASE_X2APIC | APICBASE_ENABLED;
269         wrmsr(MSR_APICBASE, apic_base);
270 }
271
272 static void     lapic_enable(void);
273 static void     lapic_resume(struct pic *pic, bool suspend_cancelled);
274 static void     lapic_timer_oneshot(struct lapic *);
275 static void     lapic_timer_oneshot_nointr(struct lapic *, uint32_t);
276 static void     lapic_timer_periodic(struct lapic *);
277 static void     lapic_timer_deadline(struct lapic *);
278 static void     lapic_timer_stop(struct lapic *);
279 static void     lapic_timer_set_divisor(u_int divisor);
280 static uint32_t lvt_mode(struct lapic *la, u_int pin, uint32_t value);
281 static int      lapic_et_start(struct eventtimer *et,
282                     sbintime_t first, sbintime_t period);
283 static int      lapic_et_stop(struct eventtimer *et);
284 static u_int    apic_idt_to_irq(u_int apic_id, u_int vector);
285 static void     lapic_set_tpr(u_int vector);
286
287 struct pic lapic_pic = { .pic_resume = lapic_resume };
288
289 /* Forward declarations for apic_ops */
290 static void     native_lapic_create(u_int apic_id, int boot_cpu);
291 static void     native_lapic_init(vm_paddr_t addr);
292 static void     native_lapic_xapic_mode(void);
293 static void     native_lapic_setup(int boot);
294 static void     native_lapic_dump(const char *str);
295 static void     native_lapic_disable(void);
296 static void     native_lapic_eoi(void);
297 static int      native_lapic_id(void);
298 static int      native_lapic_intr_pending(u_int vector);
299 static u_int    native_apic_cpuid(u_int apic_id);
300 static u_int    native_apic_alloc_vector(u_int apic_id, u_int irq);
301 static u_int    native_apic_alloc_vectors(u_int apic_id, u_int *irqs,
302                     u_int count, u_int align);
303 static void     native_apic_disable_vector(u_int apic_id, u_int vector);
304 static void     native_apic_enable_vector(u_int apic_id, u_int vector);
305 static void     native_apic_free_vector(u_int apic_id, u_int vector, u_int irq);
306 static void     native_lapic_set_logical_id(u_int apic_id, u_int cluster,
307                     u_int cluster_id);
308 static int      native_lapic_enable_pmc(void);
309 static void     native_lapic_disable_pmc(void);
310 static void     native_lapic_reenable_pmc(void);
311 static void     native_lapic_enable_cmc(void);
312 static int      native_lapic_set_lvt_mask(u_int apic_id, u_int lvt,
313                     u_char masked);
314 static int      native_lapic_set_lvt_mode(u_int apic_id, u_int lvt,
315                     uint32_t mode);
316 static int      native_lapic_set_lvt_polarity(u_int apic_id, u_int lvt,
317                     enum intr_polarity pol);
318 static int      native_lapic_set_lvt_triggermode(u_int apic_id, u_int lvt,
319                     enum intr_trigger trigger);
320 #ifdef SMP
321 static void     native_lapic_ipi_raw(register_t icrlo, u_int dest);
322 static void     native_lapic_ipi_vectored(u_int vector, int dest);
323 static int      native_lapic_ipi_wait(int delay);
324 #endif /* SMP */
325 static int      native_lapic_ipi_alloc(inthand_t *ipifunc);
326 static void     native_lapic_ipi_free(int vector);
327
328 struct apic_ops apic_ops = {
329         .create                 = native_lapic_create,
330         .init                   = native_lapic_init,
331         .xapic_mode             = native_lapic_xapic_mode,
332         .setup                  = native_lapic_setup,
333         .dump                   = native_lapic_dump,
334         .disable                = native_lapic_disable,
335         .eoi                    = native_lapic_eoi,
336         .id                     = native_lapic_id,
337         .intr_pending           = native_lapic_intr_pending,
338         .set_logical_id         = native_lapic_set_logical_id,
339         .cpuid                  = native_apic_cpuid,
340         .alloc_vector           = native_apic_alloc_vector,
341         .alloc_vectors          = native_apic_alloc_vectors,
342         .enable_vector          = native_apic_enable_vector,
343         .disable_vector         = native_apic_disable_vector,
344         .free_vector            = native_apic_free_vector,
345         .enable_pmc             = native_lapic_enable_pmc,
346         .disable_pmc            = native_lapic_disable_pmc,
347         .reenable_pmc           = native_lapic_reenable_pmc,
348         .enable_cmc             = native_lapic_enable_cmc,
349 #ifdef SMP
350         .ipi_raw                = native_lapic_ipi_raw,
351         .ipi_vectored           = native_lapic_ipi_vectored,
352         .ipi_wait               = native_lapic_ipi_wait,
353 #endif
354         .ipi_alloc              = native_lapic_ipi_alloc,
355         .ipi_free               = native_lapic_ipi_free,
356         .set_lvt_mask           = native_lapic_set_lvt_mask,
357         .set_lvt_mode           = native_lapic_set_lvt_mode,
358         .set_lvt_polarity       = native_lapic_set_lvt_polarity,
359         .set_lvt_triggermode    = native_lapic_set_lvt_triggermode,
360 };
361
362 static uint32_t
363 lvt_mode(struct lapic *la, u_int pin, uint32_t value)
364 {
365         struct lvt *lvt;
366
367         KASSERT(pin <= APIC_LVT_MAX, ("%s: pin %u out of range", __func__, pin));
368         if (la->la_lvts[pin].lvt_active)
369                 lvt = &la->la_lvts[pin];
370         else
371                 lvt = &lvts[pin];
372
373         value &= ~(APIC_LVT_M | APIC_LVT_TM | APIC_LVT_IIPP | APIC_LVT_DM |
374             APIC_LVT_VECTOR);
375         if (lvt->lvt_edgetrigger == 0)
376                 value |= APIC_LVT_TM;
377         if (lvt->lvt_activehi == 0)
378                 value |= APIC_LVT_IIPP_INTALO;
379         if (lvt->lvt_masked)
380                 value |= APIC_LVT_M;
381         value |= lvt->lvt_mode;
382         switch (lvt->lvt_mode) {
383         case APIC_LVT_DM_NMI:
384         case APIC_LVT_DM_SMI:
385         case APIC_LVT_DM_INIT:
386         case APIC_LVT_DM_EXTINT:
387                 if (!lvt->lvt_edgetrigger && bootverbose) {
388                         printf("lapic%u: Forcing LINT%u to edge trigger\n",
389                             la->la_id, pin);
390                         value |= APIC_LVT_TM;
391                 }
392                 /* Use a vector of 0. */
393                 break;
394         case APIC_LVT_DM_FIXED:
395                 value |= lvt->lvt_vector;
396                 break;
397         default:
398                 panic("bad APIC LVT delivery mode: %#x\n", value);
399         }
400         return (value);
401 }
402
403 /*
404  * Map the local APIC and setup necessary interrupt vectors.
405  */
406 static void
407 native_lapic_init(vm_paddr_t addr)
408 {
409 #ifdef SMP
410         uint64_t r, r1, r2, rx;
411 #endif
412         uint32_t ver;
413         u_int regs[4];
414         int i, arat;
415
416         /*
417          * Enable x2APIC mode if possible. Map the local APIC
418          * registers page.
419          *
420          * Keep the LAPIC registers page mapped uncached for x2APIC
421          * mode too, to have direct map page attribute set to
422          * uncached.  This is needed to work around CPU errata present
423          * on all Intel processors.
424          */
425         KASSERT(trunc_page(addr) == addr,
426             ("local APIC not aligned on a page boundary"));
427         lapic_paddr = addr;
428         lapic_map = pmap_mapdev(addr, PAGE_SIZE);
429         if (x2apic_mode) {
430                 native_lapic_enable_x2apic();
431                 lapic_map = NULL;
432         }
433
434         /* Setup the spurious interrupt handler. */
435         setidt(APIC_SPURIOUS_INT, IDTVEC(spuriousint), SDT_APIC, SEL_KPL,
436             GSEL_APIC);
437
438         /* Perform basic initialization of the BSP's local APIC. */
439         lapic_enable();
440
441         /* Set BSP's per-CPU local APIC ID. */
442         PCPU_SET(apic_id, lapic_id());
443
444         /* Local APIC timer interrupt. */
445         setidt(APIC_TIMER_INT, IDTVEC(timerint), SDT_APIC, SEL_KPL, GSEL_APIC);
446
447         /* Local APIC error interrupt. */
448         setidt(APIC_ERROR_INT, IDTVEC(errorint), SDT_APIC, SEL_KPL, GSEL_APIC);
449
450         /* XXX: Thermal interrupt */
451
452         /* Local APIC CMCI. */
453         setidt(APIC_CMC_INT, IDTVEC(cmcint), SDT_APICT, SEL_KPL, GSEL_APIC);
454
455         if ((resource_int_value("apic", 0, "clock", &i) != 0 || i != 0)) {
456                 arat = 0;
457                 /* Intel CPUID 0x06 EAX[2] set if APIC timer runs in C3. */
458                 if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_high >= 6) {
459                         do_cpuid(0x06, regs);
460                         if ((regs[0] & CPUTPM1_ARAT) != 0)
461                                 arat = 1;
462                 }
463                 bzero(&lapic_et, sizeof(lapic_et));
464                 lapic_et.et_name = "LAPIC";
465                 lapic_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT |
466                     ET_FLAGS_PERCPU;
467                 lapic_et.et_quality = 600;
468                 if (!arat) {
469                         lapic_et.et_flags |= ET_FLAGS_C3STOP;
470                         lapic_et.et_quality -= 200;
471                 } else if ((cpu_feature & CPUID_TSC) != 0 &&
472                     (cpu_feature2 & CPUID2_TSCDLT) != 0 &&
473                     tsc_is_invariant && tsc_freq != 0) {
474                         lapic_timer_tsc_deadline = 1;
475                         TUNABLE_INT_FETCH("hw.lapic_tsc_deadline",
476                             &lapic_timer_tsc_deadline);
477                 }
478
479                 lapic_et.et_frequency = 0;
480                 /* We don't know frequency yet, so trying to guess. */
481                 lapic_et.et_min_period = 0x00001000LL;
482                 lapic_et.et_max_period = SBT_1S;
483                 lapic_et.et_start = lapic_et_start;
484                 lapic_et.et_stop = lapic_et_stop;
485                 lapic_et.et_priv = NULL;
486                 et_register(&lapic_et);
487         }
488
489         /*
490          * Set lapic_eoi_suppression after lapic_enable(), to not
491          * enable suppression in the hardware prematurely.  Note that
492          * we by default enable suppression even when system only has
493          * one IO-APIC, since EOI is broadcasted to all APIC agents,
494          * including CPUs, otherwise.
495          *
496          * It seems that at least some KVM versions report
497          * EOI_SUPPRESSION bit, but auto-EOI does not work.
498          */
499         ver = lapic_read32(LAPIC_VERSION);
500         if ((ver & APIC_VER_EOI_SUPPRESSION) != 0) {
501                 lapic_eoi_suppression = 1;
502                 if (vm_guest == VM_GUEST_KVM) {
503                         if (bootverbose)
504                                 printf(
505                        "KVM -- disabling lapic eoi suppression\n");
506                         lapic_eoi_suppression = 0;
507                 }
508                 TUNABLE_INT_FETCH("hw.lapic_eoi_suppression",
509                     &lapic_eoi_suppression);
510         }
511
512 #ifdef SMP
513 #define LOOPS   100000
514         /*
515          * Calibrate the busy loop waiting for IPI ack in xAPIC mode.
516          * lapic_ipi_wait_mult contains the number of iterations which
517          * approximately delay execution for 1 microsecond (the
518          * argument to native_lapic_ipi_wait() is in microseconds).
519          *
520          * We assume that TSC is present and already measured.
521          * Possible TSC frequency jumps are irrelevant to the
522          * calibration loop below, the CPU clock management code is
523          * not yet started, and we do not enter sleep states.
524          */
525         KASSERT((cpu_feature & CPUID_TSC) != 0 && tsc_freq != 0,
526             ("TSC not initialized"));
527         if (!x2apic_mode) {
528                 r = rdtsc();
529                 for (rx = 0; rx < LOOPS; rx++) {
530                         (void)lapic_read_icr_lo();
531                         ia32_pause();
532                 }
533                 r = rdtsc() - r;
534                 r1 = tsc_freq * LOOPS;
535                 r2 = r * 1000000;
536                 lapic_ipi_wait_mult = r1 >= r2 ? r1 / r2 : 1;
537                 if (bootverbose) {
538                         printf("LAPIC: ipi_wait() us multiplier %ju (r %ju "
539                             "tsc %ju)\n", (uintmax_t)lapic_ipi_wait_mult,
540                             (uintmax_t)r, (uintmax_t)tsc_freq);
541                 }
542         }
543 #undef LOOPS
544 #endif /* SMP */
545 }
546
547 /*
548  * Create a local APIC instance.
549  */
550 static void
551 native_lapic_create(u_int apic_id, int boot_cpu)
552 {
553         int i;
554
555         if (apic_id > MAX_APIC_ID) {
556                 printf("APIC: Ignoring local APIC with ID %d\n", apic_id);
557                 if (boot_cpu)
558                         panic("Can't ignore BSP");
559                 return;
560         }
561         KASSERT(!lapics[apic_id].la_present, ("duplicate local APIC %u",
562             apic_id));
563
564         /*
565          * Assume no local LVT overrides and a cluster of 0 and
566          * intra-cluster ID of 0.
567          */
568         lapics[apic_id].la_present = 1;
569         lapics[apic_id].la_id = apic_id;
570         for (i = 0; i <= APIC_LVT_MAX; i++) {
571                 lapics[apic_id].la_lvts[i] = lvts[i];
572                 lapics[apic_id].la_lvts[i].lvt_active = 0;
573         }
574         for (i = 0; i <= APIC_NUM_IOINTS; i++)
575             lapics[apic_id].la_ioint_irqs[i] = -1;
576         lapics[apic_id].la_ioint_irqs[IDT_SYSCALL - APIC_IO_INTS] = IRQ_SYSCALL;
577         lapics[apic_id].la_ioint_irqs[APIC_TIMER_INT - APIC_IO_INTS] =
578             IRQ_TIMER;
579 #ifdef KDTRACE_HOOKS
580         lapics[apic_id].la_ioint_irqs[IDT_DTRACE_RET - APIC_IO_INTS] =
581             IRQ_DTRACE_RET;
582 #endif
583 #ifdef XENHVM
584         lapics[apic_id].la_ioint_irqs[IDT_EVTCHN - APIC_IO_INTS] = IRQ_EVTCHN;
585 #endif
586
587
588 #ifdef SMP
589         cpu_add(apic_id, boot_cpu);
590 #endif
591 }
592
593 /*
594  * Dump contents of local APIC registers
595  */
596 static void
597 native_lapic_dump(const char* str)
598 {
599         uint32_t maxlvt;
600
601         maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
602         printf("cpu%d %s:\n", PCPU_GET(cpuid), str);
603         printf("     ID: 0x%08x   VER: 0x%08x LDR: 0x%08x DFR: 0x%08x",
604             lapic_read32(LAPIC_ID), lapic_read32(LAPIC_VERSION),
605             lapic_read32(LAPIC_LDR), x2apic_mode ? 0 : lapic_read32(LAPIC_DFR));
606         if ((cpu_feature2 & CPUID2_X2APIC) != 0)
607                 printf(" x2APIC: %d", x2apic_mode);
608         printf("\n  lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
609             lapic_read32(LAPIC_LVT_LINT0), lapic_read32(LAPIC_LVT_LINT1),
610             lapic_read32(LAPIC_TPR), lapic_read32(LAPIC_SVR));
611         printf("  timer: 0x%08x therm: 0x%08x err: 0x%08x",
612             lapic_read32(LAPIC_LVT_TIMER), lapic_read32(LAPIC_LVT_THERMAL),
613             lapic_read32(LAPIC_LVT_ERROR));
614         if (maxlvt >= APIC_LVT_PMC)
615                 printf(" pmc: 0x%08x", lapic_read32(LAPIC_LVT_PCINT));
616         printf("\n");
617         if (maxlvt >= APIC_LVT_CMCI)
618                 printf("   cmci: 0x%08x\n", lapic_read32(LAPIC_LVT_CMCI));
619 }
620
621 static void
622 native_lapic_xapic_mode(void)
623 {
624         register_t saveintr;
625
626         saveintr = intr_disable();
627         if (x2apic_mode)
628                 native_lapic_enable_x2apic();
629         intr_restore(saveintr);
630 }
631
632 static void
633 native_lapic_setup(int boot)
634 {
635         struct lapic *la;
636         uint32_t maxlvt;
637         register_t saveintr;
638         char buf[MAXCOMLEN + 1];
639
640         saveintr = intr_disable();
641
642         la = &lapics[lapic_id()];
643         KASSERT(la->la_present, ("missing APIC structure"));
644         maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
645
646         /* Initialize the TPR to allow all interrupts. */
647         lapic_set_tpr(0);
648
649         /* Setup spurious vector and enable the local APIC. */
650         lapic_enable();
651
652         /* Program LINT[01] LVT entries. */
653         lapic_write32(LAPIC_LVT_LINT0, lvt_mode(la, APIC_LVT_LINT0,
654             lapic_read32(LAPIC_LVT_LINT0)));
655         lapic_write32(LAPIC_LVT_LINT1, lvt_mode(la, APIC_LVT_LINT1,
656             lapic_read32(LAPIC_LVT_LINT1)));
657
658         /* Program the PMC LVT entry if present. */
659         if (maxlvt >= APIC_LVT_PMC) {
660                 lapic_write32(LAPIC_LVT_PCINT, lvt_mode(la, APIC_LVT_PMC,
661                     LAPIC_LVT_PCINT));
662         }
663
664         /* Program timer LVT and setup handler. */
665         la->lvt_timer_base = lvt_mode(la, APIC_LVT_TIMER,
666             lapic_read32(LAPIC_LVT_TIMER));
667         la->lvt_timer_last = la->lvt_timer_base;
668         lapic_write32(LAPIC_LVT_TIMER, la->lvt_timer_base);
669         if (boot) {
670                 snprintf(buf, sizeof(buf), "cpu%d:timer", PCPU_GET(cpuid));
671                 intrcnt_add(buf, &la->la_timer_count);
672         }
673
674         /* Setup the timer if configured. */
675         if (la->la_timer_mode != LAT_MODE_UNDEF) {
676                 KASSERT(la->la_timer_period != 0, ("lapic%u: zero divisor",
677                     lapic_id()));
678                 switch (la->la_timer_mode) {
679                 case LAT_MODE_PERIODIC:
680                         lapic_timer_set_divisor(lapic_timer_divisor);
681                         lapic_timer_periodic(la);
682                         break;
683                 case LAT_MODE_ONESHOT:
684                         lapic_timer_set_divisor(lapic_timer_divisor);
685                         lapic_timer_oneshot(la);
686                         break;
687                 case LAT_MODE_DEADLINE:
688                         lapic_timer_deadline(la);
689                         break;
690                 default:
691                         panic("corrupted la_timer_mode %p %d", la,
692                             la->la_timer_mode);
693                 }
694         }
695
696         /* Program error LVT and clear any existing errors. */
697         lapic_write32(LAPIC_LVT_ERROR, lvt_mode(la, APIC_LVT_ERROR,
698             lapic_read32(LAPIC_LVT_ERROR)));
699         lapic_write32(LAPIC_ESR, 0);
700
701         /* XXX: Thermal LVT */
702
703         /* Program the CMCI LVT entry if present. */
704         if (maxlvt >= APIC_LVT_CMCI) {
705                 lapic_write32(LAPIC_LVT_CMCI, lvt_mode(la, APIC_LVT_CMCI,
706                     lapic_read32(LAPIC_LVT_CMCI)));
707         }
708
709         intr_restore(saveintr);
710 }
711
712 static void
713 native_lapic_reenable_pmc(void)
714 {
715 #ifdef HWPMC_HOOKS
716         uint32_t value;
717
718         value = lapic_read32(LAPIC_LVT_PCINT);
719         value &= ~APIC_LVT_M;
720         lapic_write32(LAPIC_LVT_PCINT, value);
721 #endif
722 }
723
724 #ifdef HWPMC_HOOKS
725 static void
726 lapic_update_pmc(void *dummy)
727 {
728         struct lapic *la;
729
730         la = &lapics[lapic_id()];
731         lapic_write32(LAPIC_LVT_PCINT, lvt_mode(la, APIC_LVT_PMC,
732             lapic_read32(LAPIC_LVT_PCINT)));
733 }
734 #endif
735
736 static int
737 native_lapic_enable_pmc(void)
738 {
739 #ifdef HWPMC_HOOKS
740         u_int32_t maxlvt;
741
742         /* Fail if the local APIC is not present. */
743         if (!x2apic_mode && lapic_map == NULL)
744                 return (0);
745
746         /* Fail if the PMC LVT is not present. */
747         maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
748         if (maxlvt < APIC_LVT_PMC)
749                 return (0);
750
751         lvts[APIC_LVT_PMC].lvt_masked = 0;
752
753 #ifdef EARLY_AP_STARTUP
754         MPASS(mp_ncpus == 1 || smp_started);
755         smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
756 #else
757 #ifdef SMP
758         /*
759          * If hwpmc was loaded at boot time then the APs may not be
760          * started yet.  In that case, don't forward the request to
761          * them as they will program the lvt when they start.
762          */
763         if (smp_started)
764                 smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
765         else
766 #endif
767                 lapic_update_pmc(NULL);
768 #endif
769         return (1);
770 #else
771         return (0);
772 #endif
773 }
774
775 static void
776 native_lapic_disable_pmc(void)
777 {
778 #ifdef HWPMC_HOOKS
779         u_int32_t maxlvt;
780
781         /* Fail if the local APIC is not present. */
782         if (!x2apic_mode && lapic_map == NULL)
783                 return;
784
785         /* Fail if the PMC LVT is not present. */
786         maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
787         if (maxlvt < APIC_LVT_PMC)
788                 return;
789
790         lvts[APIC_LVT_PMC].lvt_masked = 1;
791
792 #ifdef SMP
793         /* The APs should always be started when hwpmc is unloaded. */
794         KASSERT(mp_ncpus == 1 || smp_started, ("hwpmc unloaded too early"));
795 #endif
796         smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
797 #endif
798 }
799
800 static void
801 lapic_calibrate_initcount(struct eventtimer *et, struct lapic *la)
802 {
803         u_long value;
804
805         /* Start off with a divisor of 2 (power on reset default). */
806         lapic_timer_divisor = 2;
807         /* Try to calibrate the local APIC timer. */
808         do {
809                 lapic_timer_set_divisor(lapic_timer_divisor);
810                 lapic_timer_oneshot_nointr(la, APIC_TIMER_MAX_COUNT);
811                 DELAY(1000000);
812                 value = APIC_TIMER_MAX_COUNT - lapic_read32(LAPIC_CCR_TIMER);
813                 if (value != APIC_TIMER_MAX_COUNT)
814                         break;
815                 lapic_timer_divisor <<= 1;
816         } while (lapic_timer_divisor <= 128);
817         if (lapic_timer_divisor > 128)
818                 panic("lapic: Divisor too big");
819         if (bootverbose) {
820                 printf("lapic: Divisor %lu, Frequency %lu Hz\n",
821                     lapic_timer_divisor, value);
822         }
823         count_freq = value;
824 }
825
826 static void
827 lapic_calibrate_deadline(struct eventtimer *et, struct lapic *la __unused)
828 {
829
830         if (bootverbose) {
831                 printf("lapic: deadline tsc mode, Frequency %ju Hz\n",
832                     (uintmax_t)tsc_freq);
833         }
834 }
835
836 static void
837 lapic_change_mode(struct eventtimer *et, struct lapic *la,
838     enum lat_timer_mode newmode)
839 {
840
841         if (la->la_timer_mode == newmode)
842                 return;
843         switch (newmode) {
844         case LAT_MODE_PERIODIC:
845                 lapic_timer_set_divisor(lapic_timer_divisor);
846                 et->et_frequency = count_freq;
847                 break;
848         case LAT_MODE_DEADLINE:
849                 et->et_frequency = tsc_freq;
850                 break;
851         case LAT_MODE_ONESHOT:
852                 lapic_timer_set_divisor(lapic_timer_divisor);
853                 et->et_frequency = count_freq;
854                 break;
855         default:
856                 panic("lapic_change_mode %d", newmode);
857         }
858         la->la_timer_mode = newmode;
859         et->et_min_period = (0x00000002LLU << 32) / et->et_frequency;
860         et->et_max_period = (0xfffffffeLLU << 32) / et->et_frequency;
861 }
862
863 static int
864 lapic_et_start(struct eventtimer *et, sbintime_t first, sbintime_t period)
865 {
866         struct lapic *la;
867
868         la = &lapics[PCPU_GET(apic_id)];
869         if (et->et_frequency == 0) {
870                 lapic_calibrate_initcount(et, la);
871                 if (lapic_timer_tsc_deadline)
872                         lapic_calibrate_deadline(et, la);
873         }
874         if (period != 0) {
875                 lapic_change_mode(et, la, LAT_MODE_PERIODIC);
876                 la->la_timer_period = ((uint32_t)et->et_frequency * period) >>
877                     32;
878                 lapic_timer_periodic(la);
879         } else if (lapic_timer_tsc_deadline) {
880                 lapic_change_mode(et, la, LAT_MODE_DEADLINE);
881                 la->la_timer_period = (et->et_frequency * first) >> 32;
882                 lapic_timer_deadline(la);
883         } else {
884                 lapic_change_mode(et, la, LAT_MODE_ONESHOT);
885                 la->la_timer_period = ((uint32_t)et->et_frequency * first) >>
886                     32;
887                 lapic_timer_oneshot(la);
888         }
889         return (0);
890 }
891
892 static int
893 lapic_et_stop(struct eventtimer *et)
894 {
895         struct lapic *la;
896
897         la = &lapics[PCPU_GET(apic_id)];
898         lapic_timer_stop(la);
899         la->la_timer_mode = LAT_MODE_UNDEF;
900         return (0);
901 }
902
903 static void
904 native_lapic_disable(void)
905 {
906         uint32_t value;
907
908         /* Software disable the local APIC. */
909         value = lapic_read32(LAPIC_SVR);
910         value &= ~APIC_SVR_SWEN;
911         lapic_write32(LAPIC_SVR, value);
912 }
913
914 static void
915 lapic_enable(void)
916 {
917         uint32_t value;
918
919         /* Program the spurious vector to enable the local APIC. */
920         value = lapic_read32(LAPIC_SVR);
921         value &= ~(APIC_SVR_VECTOR | APIC_SVR_FOCUS);
922         value |= APIC_SVR_FEN | APIC_SVR_SWEN | APIC_SPURIOUS_INT;
923         if (lapic_eoi_suppression)
924                 value |= APIC_SVR_EOI_SUPPRESSION;
925         lapic_write32(LAPIC_SVR, value);
926 }
927
928 /* Reset the local APIC on the BSP during resume. */
929 static void
930 lapic_resume(struct pic *pic, bool suspend_cancelled)
931 {
932
933         lapic_setup(0);
934 }
935
936 static int
937 native_lapic_id(void)
938 {
939         uint32_t v;
940
941         KASSERT(x2apic_mode || lapic_map != NULL, ("local APIC is not mapped"));
942         v = lapic_read32(LAPIC_ID);
943         if (!x2apic_mode)
944                 v >>= APIC_ID_SHIFT;
945         return (v);
946 }
947
948 static int
949 native_lapic_intr_pending(u_int vector)
950 {
951         uint32_t irr;
952
953         /*
954          * The IRR registers are an array of registers each of which
955          * only describes 32 interrupts in the low 32 bits.  Thus, we
956          * divide the vector by 32 to get the register index.
957          * Finally, we modulus the vector by 32 to determine the
958          * individual bit to test.
959          */
960         irr = lapic_read32(LAPIC_IRR0 + vector / 32);
961         return (irr & 1 << (vector % 32));
962 }
963
964 static void
965 native_lapic_set_logical_id(u_int apic_id, u_int cluster, u_int cluster_id)
966 {
967         struct lapic *la;
968
969         KASSERT(lapics[apic_id].la_present, ("%s: APIC %u doesn't exist",
970             __func__, apic_id));
971         KASSERT(cluster <= APIC_MAX_CLUSTER, ("%s: cluster %u too big",
972             __func__, cluster));
973         KASSERT(cluster_id <= APIC_MAX_INTRACLUSTER_ID,
974             ("%s: intra cluster id %u too big", __func__, cluster_id));
975         la = &lapics[apic_id];
976         la->la_cluster = cluster;
977         la->la_cluster_id = cluster_id;
978 }
979
980 static int
981 native_lapic_set_lvt_mask(u_int apic_id, u_int pin, u_char masked)
982 {
983
984         if (pin > APIC_LVT_MAX)
985                 return (EINVAL);
986         if (apic_id == APIC_ID_ALL) {
987                 lvts[pin].lvt_masked = masked;
988                 if (bootverbose)
989                         printf("lapic:");
990         } else {
991                 KASSERT(lapics[apic_id].la_present,
992                     ("%s: missing APIC %u", __func__, apic_id));
993                 lapics[apic_id].la_lvts[pin].lvt_masked = masked;
994                 lapics[apic_id].la_lvts[pin].lvt_active = 1;
995                 if (bootverbose)
996                         printf("lapic%u:", apic_id);
997         }
998         if (bootverbose)
999                 printf(" LINT%u %s\n", pin, masked ? "masked" : "unmasked");
1000         return (0);
1001 }
1002
1003 static int
1004 native_lapic_set_lvt_mode(u_int apic_id, u_int pin, u_int32_t mode)
1005 {
1006         struct lvt *lvt;
1007
1008         if (pin > APIC_LVT_MAX)
1009                 return (EINVAL);
1010         if (apic_id == APIC_ID_ALL) {
1011                 lvt = &lvts[pin];
1012                 if (bootverbose)
1013                         printf("lapic:");
1014         } else {
1015                 KASSERT(lapics[apic_id].la_present,
1016                     ("%s: missing APIC %u", __func__, apic_id));
1017                 lvt = &lapics[apic_id].la_lvts[pin];
1018                 lvt->lvt_active = 1;
1019                 if (bootverbose)
1020                         printf("lapic%u:", apic_id);
1021         }
1022         lvt->lvt_mode = mode;
1023         switch (mode) {
1024         case APIC_LVT_DM_NMI:
1025         case APIC_LVT_DM_SMI:
1026         case APIC_LVT_DM_INIT:
1027         case APIC_LVT_DM_EXTINT:
1028                 lvt->lvt_edgetrigger = 1;
1029                 lvt->lvt_activehi = 1;
1030                 if (mode == APIC_LVT_DM_EXTINT)
1031                         lvt->lvt_masked = 1;
1032                 else
1033                         lvt->lvt_masked = 0;
1034                 break;
1035         default:
1036                 panic("Unsupported delivery mode: 0x%x\n", mode);
1037         }
1038         if (bootverbose) {
1039                 printf(" Routing ");
1040                 switch (mode) {
1041                 case APIC_LVT_DM_NMI:
1042                         printf("NMI");
1043                         break;
1044                 case APIC_LVT_DM_SMI:
1045                         printf("SMI");
1046                         break;
1047                 case APIC_LVT_DM_INIT:
1048                         printf("INIT");
1049                         break;
1050                 case APIC_LVT_DM_EXTINT:
1051                         printf("ExtINT");
1052                         break;
1053                 }
1054                 printf(" -> LINT%u\n", pin);
1055         }
1056         return (0);
1057 }
1058
1059 static int
1060 native_lapic_set_lvt_polarity(u_int apic_id, u_int pin, enum intr_polarity pol)
1061 {
1062
1063         if (pin > APIC_LVT_MAX || pol == INTR_POLARITY_CONFORM)
1064                 return (EINVAL);
1065         if (apic_id == APIC_ID_ALL) {
1066                 lvts[pin].lvt_activehi = (pol == INTR_POLARITY_HIGH);
1067                 if (bootverbose)
1068                         printf("lapic:");
1069         } else {
1070                 KASSERT(lapics[apic_id].la_present,
1071                     ("%s: missing APIC %u", __func__, apic_id));
1072                 lapics[apic_id].la_lvts[pin].lvt_active = 1;
1073                 lapics[apic_id].la_lvts[pin].lvt_activehi =
1074                     (pol == INTR_POLARITY_HIGH);
1075                 if (bootverbose)
1076                         printf("lapic%u:", apic_id);
1077         }
1078         if (bootverbose)
1079                 printf(" LINT%u polarity: %s\n", pin,
1080                     pol == INTR_POLARITY_HIGH ? "high" : "low");
1081         return (0);
1082 }
1083
1084 static int
1085 native_lapic_set_lvt_triggermode(u_int apic_id, u_int pin,
1086      enum intr_trigger trigger)
1087 {
1088
1089         if (pin > APIC_LVT_MAX || trigger == INTR_TRIGGER_CONFORM)
1090                 return (EINVAL);
1091         if (apic_id == APIC_ID_ALL) {
1092                 lvts[pin].lvt_edgetrigger = (trigger == INTR_TRIGGER_EDGE);
1093                 if (bootverbose)
1094                         printf("lapic:");
1095         } else {
1096                 KASSERT(lapics[apic_id].la_present,
1097                     ("%s: missing APIC %u", __func__, apic_id));
1098                 lapics[apic_id].la_lvts[pin].lvt_edgetrigger =
1099                     (trigger == INTR_TRIGGER_EDGE);
1100                 lapics[apic_id].la_lvts[pin].lvt_active = 1;
1101                 if (bootverbose)
1102                         printf("lapic%u:", apic_id);
1103         }
1104         if (bootverbose)
1105                 printf(" LINT%u trigger: %s\n", pin,
1106                     trigger == INTR_TRIGGER_EDGE ? "edge" : "level");
1107         return (0);
1108 }
1109
1110 /*
1111  * Adjust the TPR of the current CPU so that it blocks all interrupts below
1112  * the passed in vector.
1113  */
1114 static void
1115 lapic_set_tpr(u_int vector)
1116 {
1117 #ifdef CHEAP_TPR
1118         lapic_write32(LAPIC_TPR, vector);
1119 #else
1120         uint32_t tpr;
1121
1122         tpr = lapic_read32(LAPIC_TPR) & ~APIC_TPR_PRIO;
1123         tpr |= vector;
1124         lapic_write32(LAPIC_TPR, tpr);
1125 #endif
1126 }
1127
1128 static void
1129 native_lapic_eoi(void)
1130 {
1131
1132         lapic_write32_nofence(LAPIC_EOI, 0);
1133 }
1134
1135 void
1136 lapic_handle_intr(int vector, struct trapframe *frame)
1137 {
1138         struct intsrc *isrc;
1139
1140         isrc = intr_lookup_source(apic_idt_to_irq(PCPU_GET(apic_id),
1141             vector));
1142         intr_execute_handlers(isrc, frame);
1143 }
1144
1145 void
1146 lapic_handle_timer(struct trapframe *frame)
1147 {
1148         struct lapic *la;
1149         struct trapframe *oldframe;
1150         struct thread *td;
1151
1152         /* Send EOI first thing. */
1153         lapic_eoi();
1154
1155 #if defined(SMP) && !defined(SCHED_ULE)
1156         /*
1157          * Don't do any accounting for the disabled HTT cores, since it
1158          * will provide misleading numbers for the userland.
1159          *
1160          * No locking is necessary here, since even if we lose the race
1161          * when hlt_cpus_mask changes it is not a big deal, really.
1162          *
1163          * Don't do that for ULE, since ULE doesn't consider hlt_cpus_mask
1164          * and unlike other schedulers it actually schedules threads to
1165          * those CPUs.
1166          */
1167         if (CPU_ISSET(PCPU_GET(cpuid), &hlt_cpus_mask))
1168                 return;
1169 #endif
1170
1171         /* Look up our local APIC structure for the tick counters. */
1172         la = &lapics[PCPU_GET(apic_id)];
1173         (*la->la_timer_count)++;
1174         critical_enter();
1175         if (lapic_et.et_active) {
1176                 td = curthread;
1177                 td->td_intr_nesting_level++;
1178                 oldframe = td->td_intr_frame;
1179                 td->td_intr_frame = frame;
1180                 lapic_et.et_event_cb(&lapic_et, lapic_et.et_arg);
1181                 td->td_intr_frame = oldframe;
1182                 td->td_intr_nesting_level--;
1183         }
1184         critical_exit();
1185 }
1186
1187 static void
1188 lapic_timer_set_divisor(u_int divisor)
1189 {
1190
1191         KASSERT(powerof2(divisor), ("lapic: invalid divisor %u", divisor));
1192         KASSERT(ffs(divisor) <= nitems(lapic_timer_divisors),
1193                 ("lapic: invalid divisor %u", divisor));
1194         lapic_write32(LAPIC_DCR_TIMER, lapic_timer_divisors[ffs(divisor) - 1]);
1195 }
1196
1197 static void
1198 lapic_timer_oneshot(struct lapic *la)
1199 {
1200         uint32_t value;
1201
1202         value = la->lvt_timer_base;
1203         value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1204         value |= APIC_LVTT_TM_ONE_SHOT;
1205         la->lvt_timer_last = value;
1206         lapic_write32(LAPIC_LVT_TIMER, value);
1207         lapic_write32(LAPIC_ICR_TIMER, la->la_timer_period);
1208 }
1209
1210 static void
1211 lapic_timer_oneshot_nointr(struct lapic *la, uint32_t count)
1212 {
1213         uint32_t value;
1214
1215         value = la->lvt_timer_base;
1216         value &= ~APIC_LVTT_TM;
1217         value |= APIC_LVTT_TM_ONE_SHOT | APIC_LVT_M;
1218         la->lvt_timer_last = value;
1219         lapic_write32(LAPIC_LVT_TIMER, value);
1220         lapic_write32(LAPIC_ICR_TIMER, count);
1221 }
1222
1223 static void
1224 lapic_timer_periodic(struct lapic *la)
1225 {
1226         uint32_t value;
1227
1228         value = la->lvt_timer_base;
1229         value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1230         value |= APIC_LVTT_TM_PERIODIC;
1231         la->lvt_timer_last = value;
1232         lapic_write32(LAPIC_LVT_TIMER, value);
1233         lapic_write32(LAPIC_ICR_TIMER, la->la_timer_period);
1234 }
1235
1236 static void
1237 lapic_timer_deadline(struct lapic *la)
1238 {
1239         uint32_t value;
1240
1241         value = la->lvt_timer_base;
1242         value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1243         value |= APIC_LVTT_TM_TSCDLT;
1244         if (value != la->lvt_timer_last) {
1245                 la->lvt_timer_last = value;
1246                 lapic_write32_nofence(LAPIC_LVT_TIMER, value);
1247                 if (!x2apic_mode)
1248                         mfence();
1249         }
1250         wrmsr(MSR_TSC_DEADLINE, la->la_timer_period + rdtsc());
1251 }
1252
1253 static void
1254 lapic_timer_stop(struct lapic *la)
1255 {
1256         uint32_t value;
1257
1258         if (la->la_timer_mode == LAT_MODE_DEADLINE) {
1259                 wrmsr(MSR_TSC_DEADLINE, 0);
1260                 mfence();
1261         } else {
1262                 value = la->lvt_timer_base;
1263                 value &= ~APIC_LVTT_TM;
1264                 value |= APIC_LVT_M;
1265                 la->lvt_timer_last = value;
1266                 lapic_write32(LAPIC_LVT_TIMER, value);
1267         }
1268 }
1269
1270 void
1271 lapic_handle_cmc(void)
1272 {
1273
1274         lapic_eoi();
1275         cmc_intr();
1276 }
1277
1278 /*
1279  * Called from the mca_init() to activate the CMC interrupt if this CPU is
1280  * responsible for monitoring any MC banks for CMC events.  Since mca_init()
1281  * is called prior to lapic_setup() during boot, this just needs to unmask
1282  * this CPU's LVT_CMCI entry.
1283  */
1284 static void
1285 native_lapic_enable_cmc(void)
1286 {
1287         u_int apic_id;
1288
1289 #ifdef DEV_ATPIC
1290         if (!x2apic_mode && lapic_map == NULL)
1291                 return;
1292 #endif
1293         apic_id = PCPU_GET(apic_id);
1294         KASSERT(lapics[apic_id].la_present,
1295             ("%s: missing APIC %u", __func__, apic_id));
1296         lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_masked = 0;
1297         lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_active = 1;
1298         if (bootverbose)
1299                 printf("lapic%u: CMCI unmasked\n", apic_id);
1300 }
1301
1302 void
1303 lapic_handle_error(void)
1304 {
1305         uint32_t esr;
1306
1307         /*
1308          * Read the contents of the error status register.  Write to
1309          * the register first before reading from it to force the APIC
1310          * to update its value to indicate any errors that have
1311          * occurred since the previous write to the register.
1312          */
1313         lapic_write32(LAPIC_ESR, 0);
1314         esr = lapic_read32(LAPIC_ESR);
1315
1316         printf("CPU%d: local APIC error 0x%x\n", PCPU_GET(cpuid), esr);
1317         lapic_eoi();
1318 }
1319
1320 static u_int
1321 native_apic_cpuid(u_int apic_id)
1322 {
1323 #ifdef SMP
1324         return apic_cpuids[apic_id];
1325 #else
1326         return 0;
1327 #endif
1328 }
1329
1330 /* Request a free IDT vector to be used by the specified IRQ. */
1331 static u_int
1332 native_apic_alloc_vector(u_int apic_id, u_int irq)
1333 {
1334         u_int vector;
1335
1336         KASSERT(irq < NUM_IO_INTS, ("Invalid IRQ %u", irq));
1337
1338         /*
1339          * Search for a free vector.  Currently we just use a very simple
1340          * algorithm to find the first free vector.
1341          */
1342         mtx_lock_spin(&icu_lock);
1343         for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
1344                 if (lapics[apic_id].la_ioint_irqs[vector] != -1)
1345                         continue;
1346                 lapics[apic_id].la_ioint_irqs[vector] = irq;
1347                 mtx_unlock_spin(&icu_lock);
1348                 return (vector + APIC_IO_INTS);
1349         }
1350         mtx_unlock_spin(&icu_lock);
1351         return (0);
1352 }
1353
1354 /*
1355  * Request 'count' free contiguous IDT vectors to be used by 'count'
1356  * IRQs.  'count' must be a power of two and the vectors will be
1357  * aligned on a boundary of 'align'.  If the request cannot be
1358  * satisfied, 0 is returned.
1359  */
1360 static u_int
1361 native_apic_alloc_vectors(u_int apic_id, u_int *irqs, u_int count, u_int align)
1362 {
1363         u_int first, run, vector;
1364
1365         KASSERT(powerof2(count), ("bad count"));
1366         KASSERT(powerof2(align), ("bad align"));
1367         KASSERT(align >= count, ("align < count"));
1368 #ifdef INVARIANTS
1369         for (run = 0; run < count; run++)
1370                 KASSERT(irqs[run] < NUM_IO_INTS, ("Invalid IRQ %u at index %u",
1371                     irqs[run], run));
1372 #endif
1373
1374         /*
1375          * Search for 'count' free vectors.  As with apic_alloc_vector(),
1376          * this just uses a simple first fit algorithm.
1377          */
1378         run = 0;
1379         first = 0;
1380         mtx_lock_spin(&icu_lock);
1381         for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
1382
1383                 /* Vector is in use, end run. */
1384                 if (lapics[apic_id].la_ioint_irqs[vector] != -1) {
1385                         run = 0;
1386                         first = 0;
1387                         continue;
1388                 }
1389
1390                 /* Start a new run if run == 0 and vector is aligned. */
1391                 if (run == 0) {
1392                         if ((vector & (align - 1)) != 0)
1393                                 continue;
1394                         first = vector;
1395                 }
1396                 run++;
1397
1398                 /* Keep looping if the run isn't long enough yet. */
1399                 if (run < count)
1400                         continue;
1401
1402                 /* Found a run, assign IRQs and return the first vector. */
1403                 for (vector = 0; vector < count; vector++)
1404                         lapics[apic_id].la_ioint_irqs[first + vector] =
1405                             irqs[vector];
1406                 mtx_unlock_spin(&icu_lock);
1407                 return (first + APIC_IO_INTS);
1408         }
1409         mtx_unlock_spin(&icu_lock);
1410         printf("APIC: Couldn't find APIC vectors for %u IRQs\n", count);
1411         return (0);
1412 }
1413
1414 /*
1415  * Enable a vector for a particular apic_id.  Since all lapics share idt
1416  * entries and ioint_handlers this enables the vector on all lapics.  lapics
1417  * which do not have the vector configured would report spurious interrupts
1418  * should it fire.
1419  */
1420 static void
1421 native_apic_enable_vector(u_int apic_id, u_int vector)
1422 {
1423
1424         KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1425         KASSERT(ioint_handlers[vector / 32] != NULL,
1426             ("No ISR handler for vector %u", vector));
1427 #ifdef KDTRACE_HOOKS
1428         KASSERT(vector != IDT_DTRACE_RET,
1429             ("Attempt to overwrite DTrace entry"));
1430 #endif
1431         setidt(vector, ioint_handlers[vector / 32], SDT_APIC, SEL_KPL,
1432             GSEL_APIC);
1433 }
1434
1435 static void
1436 native_apic_disable_vector(u_int apic_id, u_int vector)
1437 {
1438
1439         KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1440 #ifdef KDTRACE_HOOKS
1441         KASSERT(vector != IDT_DTRACE_RET,
1442             ("Attempt to overwrite DTrace entry"));
1443 #endif
1444         KASSERT(ioint_handlers[vector / 32] != NULL,
1445             ("No ISR handler for vector %u", vector));
1446 #ifdef notyet
1447         /*
1448          * We can not currently clear the idt entry because other cpus
1449          * may have a valid vector at this offset.
1450          */
1451         setidt(vector, &IDTVEC(rsvd), SDT_APICT, SEL_KPL, GSEL_APIC);
1452 #endif
1453 }
1454
1455 /* Release an APIC vector when it's no longer in use. */
1456 static void
1457 native_apic_free_vector(u_int apic_id, u_int vector, u_int irq)
1458 {
1459         struct thread *td;
1460
1461         KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1462             vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1463             ("Vector %u does not map to an IRQ line", vector));
1464         KASSERT(irq < NUM_IO_INTS, ("Invalid IRQ %u", irq));
1465         KASSERT(lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] ==
1466             irq, ("IRQ mismatch"));
1467 #ifdef KDTRACE_HOOKS
1468         KASSERT(vector != IDT_DTRACE_RET,
1469             ("Attempt to overwrite DTrace entry"));
1470 #endif
1471
1472         /*
1473          * Bind us to the cpu that owned the vector before freeing it so
1474          * we don't lose an interrupt delivery race.
1475          */
1476         td = curthread;
1477         if (!rebooting) {
1478                 thread_lock(td);
1479                 if (sched_is_bound(td))
1480                         panic("apic_free_vector: Thread already bound.\n");
1481                 sched_bind(td, apic_cpuid(apic_id));
1482                 thread_unlock(td);
1483         }
1484         mtx_lock_spin(&icu_lock);
1485         lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] = -1;
1486         mtx_unlock_spin(&icu_lock);
1487         if (!rebooting) {
1488                 thread_lock(td);
1489                 sched_unbind(td);
1490                 thread_unlock(td);
1491         }
1492 }
1493
1494 /* Map an IDT vector (APIC) to an IRQ (interrupt source). */
1495 static u_int
1496 apic_idt_to_irq(u_int apic_id, u_int vector)
1497 {
1498         int irq;
1499
1500         KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1501             vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1502             ("Vector %u does not map to an IRQ line", vector));
1503 #ifdef KDTRACE_HOOKS
1504         KASSERT(vector != IDT_DTRACE_RET,
1505             ("Attempt to overwrite DTrace entry"));
1506 #endif
1507         irq = lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS];
1508         if (irq < 0)
1509                 irq = 0;
1510         return (irq);
1511 }
1512
1513 #ifdef DDB
1514 /*
1515  * Dump data about APIC IDT vector mappings.
1516  */
1517 DB_SHOW_COMMAND(apic, db_show_apic)
1518 {
1519         struct intsrc *isrc;
1520         int i, verbose;
1521         u_int apic_id;
1522         u_int irq;
1523
1524         if (strcmp(modif, "vv") == 0)
1525                 verbose = 2;
1526         else if (strcmp(modif, "v") == 0)
1527                 verbose = 1;
1528         else
1529                 verbose = 0;
1530         for (apic_id = 0; apic_id <= MAX_APIC_ID; apic_id++) {
1531                 if (lapics[apic_id].la_present == 0)
1532                         continue;
1533                 db_printf("Interrupts bound to lapic %u\n", apic_id);
1534                 for (i = 0; i < APIC_NUM_IOINTS + 1 && !db_pager_quit; i++) {
1535                         irq = lapics[apic_id].la_ioint_irqs[i];
1536                         if (irq == -1 || irq == IRQ_SYSCALL)
1537                                 continue;
1538 #ifdef KDTRACE_HOOKS
1539                         if (irq == IRQ_DTRACE_RET)
1540                                 continue;
1541 #endif
1542 #ifdef XENHVM
1543                         if (irq == IRQ_EVTCHN)
1544                                 continue;
1545 #endif
1546                         db_printf("vec 0x%2x -> ", i + APIC_IO_INTS);
1547                         if (irq == IRQ_TIMER)
1548                                 db_printf("lapic timer\n");
1549                         else if (irq < NUM_IO_INTS) {
1550                                 isrc = intr_lookup_source(irq);
1551                                 if (isrc == NULL || verbose == 0)
1552                                         db_printf("IRQ %u\n", irq);
1553                                 else
1554                                         db_dump_intr_event(isrc->is_event,
1555                                             verbose == 2);
1556                         } else
1557                                 db_printf("IRQ %u ???\n", irq);
1558                 }
1559         }
1560 }
1561
1562 static void
1563 dump_mask(const char *prefix, uint32_t v, int base)
1564 {
1565         int i, first;
1566
1567         first = 1;
1568         for (i = 0; i < 32; i++)
1569                 if (v & (1 << i)) {
1570                         if (first) {
1571                                 db_printf("%s:", prefix);
1572                                 first = 0;
1573                         }
1574                         db_printf(" %02x", base + i);
1575                 }
1576         if (!first)
1577                 db_printf("\n");
1578 }
1579
1580 /* Show info from the lapic regs for this CPU. */
1581 DB_SHOW_COMMAND(lapic, db_show_lapic)
1582 {
1583         uint32_t v;
1584
1585         db_printf("lapic ID = %d\n", lapic_id());
1586         v = lapic_read32(LAPIC_VERSION);
1587         db_printf("version  = %d.%d\n", (v & APIC_VER_VERSION) >> 4,
1588             v & 0xf);
1589         db_printf("max LVT  = %d\n", (v & APIC_VER_MAXLVT) >> MAXLVTSHIFT);
1590         v = lapic_read32(LAPIC_SVR);
1591         db_printf("SVR      = %02x (%s)\n", v & APIC_SVR_VECTOR,
1592             v & APIC_SVR_ENABLE ? "enabled" : "disabled");
1593         db_printf("TPR      = %02x\n", lapic_read32(LAPIC_TPR));
1594
1595 #define dump_field(prefix, regn, index)                                 \
1596         dump_mask(__XSTRING(prefix ## index),                           \
1597             lapic_read32(LAPIC_ ## regn ## index),                      \
1598             index * 32)
1599
1600         db_printf("In-service Interrupts:\n");
1601         dump_field(isr, ISR, 0);
1602         dump_field(isr, ISR, 1);
1603         dump_field(isr, ISR, 2);
1604         dump_field(isr, ISR, 3);
1605         dump_field(isr, ISR, 4);
1606         dump_field(isr, ISR, 5);
1607         dump_field(isr, ISR, 6);
1608         dump_field(isr, ISR, 7);
1609
1610         db_printf("TMR Interrupts:\n");
1611         dump_field(tmr, TMR, 0);
1612         dump_field(tmr, TMR, 1);
1613         dump_field(tmr, TMR, 2);
1614         dump_field(tmr, TMR, 3);
1615         dump_field(tmr, TMR, 4);
1616         dump_field(tmr, TMR, 5);
1617         dump_field(tmr, TMR, 6);
1618         dump_field(tmr, TMR, 7);
1619
1620         db_printf("IRR Interrupts:\n");
1621         dump_field(irr, IRR, 0);
1622         dump_field(irr, IRR, 1);
1623         dump_field(irr, IRR, 2);
1624         dump_field(irr, IRR, 3);
1625         dump_field(irr, IRR, 4);
1626         dump_field(irr, IRR, 5);
1627         dump_field(irr, IRR, 6);
1628         dump_field(irr, IRR, 7);
1629
1630 #undef dump_field
1631 }
1632 #endif
1633
1634 /*
1635  * APIC probing support code.  This includes code to manage enumerators.
1636  */
1637
1638 static SLIST_HEAD(, apic_enumerator) enumerators =
1639         SLIST_HEAD_INITIALIZER(enumerators);
1640 static struct apic_enumerator *best_enum;
1641
1642 void
1643 apic_register_enumerator(struct apic_enumerator *enumerator)
1644 {
1645 #ifdef INVARIANTS
1646         struct apic_enumerator *apic_enum;
1647
1648         SLIST_FOREACH(apic_enum, &enumerators, apic_next) {
1649                 if (apic_enum == enumerator)
1650                         panic("%s: Duplicate register of %s", __func__,
1651                             enumerator->apic_name);
1652         }
1653 #endif
1654         SLIST_INSERT_HEAD(&enumerators, enumerator, apic_next);
1655 }
1656
1657 /*
1658  * We have to look for CPU's very, very early because certain subsystems
1659  * want to know how many CPU's we have extremely early on in the boot
1660  * process.
1661  */
1662 static void
1663 apic_init(void *dummy __unused)
1664 {
1665         struct apic_enumerator *enumerator;
1666         int retval, best;
1667
1668         /* We only support built in local APICs. */
1669         if (!(cpu_feature & CPUID_APIC))
1670                 return;
1671
1672         /* Don't probe if APIC mode is disabled. */
1673         if (resource_disabled("apic", 0))
1674                 return;
1675
1676         /* Probe all the enumerators to find the best match. */
1677         best_enum = NULL;
1678         best = 0;
1679         SLIST_FOREACH(enumerator, &enumerators, apic_next) {
1680                 retval = enumerator->apic_probe();
1681                 if (retval > 0)
1682                         continue;
1683                 if (best_enum == NULL || best < retval) {
1684                         best_enum = enumerator;
1685                         best = retval;
1686                 }
1687         }
1688         if (best_enum == NULL) {
1689                 if (bootverbose)
1690                         printf("APIC: Could not find any APICs.\n");
1691 #ifndef DEV_ATPIC
1692                 panic("running without device atpic requires a local APIC");
1693 #endif
1694                 return;
1695         }
1696
1697         if (bootverbose)
1698                 printf("APIC: Using the %s enumerator.\n",
1699                     best_enum->apic_name);
1700
1701 #ifdef I686_CPU
1702         /*
1703          * To work around an errata, we disable the local APIC on some
1704          * CPUs during early startup.  We need to turn the local APIC back
1705          * on on such CPUs now.
1706          */
1707         ppro_reenable_apic();
1708 #endif
1709
1710         /* Probe the CPU's in the system. */
1711         retval = best_enum->apic_probe_cpus();
1712         if (retval != 0)
1713                 printf("%s: Failed to probe CPUs: returned %d\n",
1714                     best_enum->apic_name, retval);
1715
1716 }
1717 SYSINIT(apic_init, SI_SUB_TUNABLES - 1, SI_ORDER_SECOND, apic_init, NULL);
1718
1719 /*
1720  * Setup the local APIC.  We have to do this prior to starting up the APs
1721  * in the SMP case.
1722  */
1723 static void
1724 apic_setup_local(void *dummy __unused)
1725 {
1726         int retval;
1727
1728         if (best_enum == NULL)
1729                 return;
1730
1731         /* Initialize the local APIC. */
1732         retval = best_enum->apic_setup_local();
1733         if (retval != 0)
1734                 printf("%s: Failed to setup the local APIC: returned %d\n",
1735                     best_enum->apic_name, retval);
1736 }
1737 SYSINIT(apic_setup_local, SI_SUB_CPU, SI_ORDER_SECOND, apic_setup_local, NULL);
1738
1739 /*
1740  * Setup the I/O APICs.
1741  */
1742 static void
1743 apic_setup_io(void *dummy __unused)
1744 {
1745         int retval;
1746
1747         if (best_enum == NULL)
1748                 return;
1749
1750         /*
1751          * Local APIC must be registered before other PICs and pseudo PICs
1752          * for proper suspend/resume order.
1753          */
1754         intr_register_pic(&lapic_pic);
1755
1756         retval = best_enum->apic_setup_io();
1757         if (retval != 0)
1758                 printf("%s: Failed to setup I/O APICs: returned %d\n",
1759                     best_enum->apic_name, retval);
1760
1761         /*
1762          * Finish setting up the local APIC on the BSP once we know
1763          * how to properly program the LINT pins.  In particular, this
1764          * enables the EOI suppression mode, if LAPIC support it and
1765          * user did not disabled the mode.
1766          */
1767         lapic_setup(1);
1768         if (bootverbose)
1769                 lapic_dump("BSP");
1770
1771         /* Enable the MSI "pic". */
1772         init_ops.msi_init();
1773 }
1774 SYSINIT(apic_setup_io, SI_SUB_INTR, SI_ORDER_THIRD, apic_setup_io, NULL);
1775
1776 #ifdef SMP
1777 /*
1778  * Inter Processor Interrupt functions.  The lapic_ipi_*() functions are
1779  * private to the MD code.  The public interface for the rest of the
1780  * kernel is defined in mp_machdep.c.
1781  */
1782
1783 /*
1784  * Wait delay microseconds for IPI to be sent.  If delay is -1, we
1785  * wait forever.
1786  */
1787 static int
1788 native_lapic_ipi_wait(int delay)
1789 {
1790         uint64_t rx;
1791
1792         /* LAPIC_ICR.APIC_DELSTAT_MASK is undefined in x2APIC mode */
1793         if (x2apic_mode)
1794                 return (1);
1795
1796         for (rx = 0; delay == -1 || rx < lapic_ipi_wait_mult * delay; rx++) {
1797                 if ((lapic_read_icr_lo() & APIC_DELSTAT_MASK) ==
1798                     APIC_DELSTAT_IDLE)
1799                         return (1);
1800                 ia32_pause();
1801         }
1802         return (0);
1803 }
1804
1805 static void
1806 native_lapic_ipi_raw(register_t icrlo, u_int dest)
1807 {
1808         uint64_t icr;
1809         uint32_t vhi, vlo;
1810         register_t saveintr;
1811
1812         /* XXX: Need more sanity checking of icrlo? */
1813         KASSERT(x2apic_mode || lapic_map != NULL,
1814             ("%s called too early", __func__));
1815         KASSERT(x2apic_mode ||
1816             (dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
1817             ("%s: invalid dest field", __func__));
1818         KASSERT((icrlo & APIC_ICRLO_RESV_MASK) == 0,
1819             ("%s: reserved bits set in ICR LO register", __func__));
1820
1821         /* Set destination in ICR HI register if it is being used. */
1822         if (!x2apic_mode) {
1823                 saveintr = intr_disable();
1824                 icr = lapic_read_icr();
1825         }
1826
1827         if ((icrlo & APIC_DEST_MASK) == APIC_DEST_DESTFLD) {
1828                 if (x2apic_mode) {
1829                         vhi = dest;
1830                 } else {
1831                         vhi = icr >> 32;
1832                         vhi &= ~APIC_ID_MASK;
1833                         vhi |= dest << APIC_ID_SHIFT;
1834                 }
1835         } else {
1836                 vhi = 0;
1837         }
1838
1839         /* Program the contents of the IPI and dispatch it. */
1840         if (x2apic_mode) {
1841                 vlo = icrlo;
1842         } else {
1843                 vlo = icr;
1844                 vlo &= APIC_ICRLO_RESV_MASK;
1845                 vlo |= icrlo;
1846         }
1847         lapic_write_icr(vhi, vlo);
1848         if (!x2apic_mode)
1849                 intr_restore(saveintr);
1850 }
1851
1852 #define BEFORE_SPIN     50000
1853 #ifdef DETECT_DEADLOCK
1854 #define AFTER_SPIN      50
1855 #endif
1856
1857 static void
1858 native_lapic_ipi_vectored(u_int vector, int dest)
1859 {
1860         register_t icrlo, destfield;
1861
1862         KASSERT((vector & ~APIC_VECTOR_MASK) == 0,
1863             ("%s: invalid vector %d", __func__, vector));
1864
1865         icrlo = APIC_DESTMODE_PHY | APIC_TRIGMOD_EDGE | APIC_LEVEL_ASSERT;
1866
1867         /*
1868          * NMI IPIs are just fake vectors used to send a NMI.  Use special rules
1869          * regarding NMIs if passed, otherwise specify the vector.
1870          */
1871         if (vector >= IPI_NMI_FIRST)
1872                 icrlo |= APIC_DELMODE_NMI;
1873         else
1874                 icrlo |= vector | APIC_DELMODE_FIXED;
1875         destfield = 0;
1876         switch (dest) {
1877         case APIC_IPI_DEST_SELF:
1878                 icrlo |= APIC_DEST_SELF;
1879                 break;
1880         case APIC_IPI_DEST_ALL:
1881                 icrlo |= APIC_DEST_ALLISELF;
1882                 break;
1883         case APIC_IPI_DEST_OTHERS:
1884                 icrlo |= APIC_DEST_ALLESELF;
1885                 break;
1886         default:
1887                 KASSERT(x2apic_mode ||
1888                     (dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
1889                     ("%s: invalid destination 0x%x", __func__, dest));
1890                 destfield = dest;
1891         }
1892
1893         /* Wait for an earlier IPI to finish. */
1894         if (!lapic_ipi_wait(BEFORE_SPIN)) {
1895                 if (panicstr != NULL)
1896                         return;
1897                 else
1898                         panic("APIC: Previous IPI is stuck");
1899         }
1900
1901         lapic_ipi_raw(icrlo, destfield);
1902
1903 #ifdef DETECT_DEADLOCK
1904         /* Wait for IPI to be delivered. */
1905         if (!lapic_ipi_wait(AFTER_SPIN)) {
1906 #ifdef needsattention
1907                 /*
1908                  * XXX FIXME:
1909                  *
1910                  * The above function waits for the message to actually be
1911                  * delivered.  It breaks out after an arbitrary timeout
1912                  * since the message should eventually be delivered (at
1913                  * least in theory) and that if it wasn't we would catch
1914                  * the failure with the check above when the next IPI is
1915                  * sent.
1916                  *
1917                  * We could skip this wait entirely, EXCEPT it probably
1918                  * protects us from other routines that assume that the
1919                  * message was delivered and acted upon when this function
1920                  * returns.
1921                  */
1922                 printf("APIC: IPI might be stuck\n");
1923 #else /* !needsattention */
1924                 /* Wait until mesage is sent without a timeout. */
1925                 while (lapic_read_icr_lo() & APIC_DELSTAT_PEND)
1926                         ia32_pause();
1927 #endif /* needsattention */
1928         }
1929 #endif /* DETECT_DEADLOCK */
1930 }
1931
1932 #endif /* SMP */
1933
1934 /*
1935  * Since the IDT is shared by all CPUs the IPI slot update needs to be globally
1936  * visible.
1937  *
1938  * Consider the case where an IPI is generated immediately after allocation:
1939  *     vector = lapic_ipi_alloc(ipifunc);
1940  *     ipi_selected(other_cpus, vector);
1941  *
1942  * In xAPIC mode a write to ICR_LO has serializing semantics because the
1943  * APIC page is mapped as an uncached region. In x2APIC mode there is an
1944  * explicit 'mfence' before the ICR MSR is written. Therefore in both cases
1945  * the IDT slot update is globally visible before the IPI is delivered.
1946  */
1947 static int
1948 native_lapic_ipi_alloc(inthand_t *ipifunc)
1949 {
1950         struct gate_descriptor *ip;
1951         long func;
1952         int idx, vector;
1953
1954         KASSERT(ipifunc != &IDTVEC(rsvd), ("invalid ipifunc %p", ipifunc));
1955
1956         vector = -1;
1957         mtx_lock_spin(&icu_lock);
1958         for (idx = IPI_DYN_FIRST; idx <= IPI_DYN_LAST; idx++) {
1959                 ip = &idt[idx];
1960                 func = (ip->gd_hioffset << 16) | ip->gd_looffset;
1961                 if (func == (uintptr_t)&IDTVEC(rsvd)) {
1962                         vector = idx;
1963                         setidt(vector, ipifunc, SDT_APIC, SEL_KPL, GSEL_APIC);
1964                         break;
1965                 }
1966         }
1967         mtx_unlock_spin(&icu_lock);
1968         return (vector);
1969 }
1970
1971 static void
1972 native_lapic_ipi_free(int vector)
1973 {
1974         struct gate_descriptor *ip;
1975         long func;
1976
1977         KASSERT(vector >= IPI_DYN_FIRST && vector <= IPI_DYN_LAST,
1978             ("%s: invalid vector %d", __func__, vector));
1979
1980         mtx_lock_spin(&icu_lock);
1981         ip = &idt[vector];
1982         func = (ip->gd_hioffset << 16) | ip->gd_looffset;
1983         KASSERT(func != (uintptr_t)&IDTVEC(rsvd),
1984             ("invalid idtfunc %#lx", func));
1985         setidt(vector, &IDTVEC(rsvd), SDT_APICT, SEL_KPL, GSEL_APIC);
1986         mtx_unlock_spin(&icu_lock);
1987 }