2 * from: vector.s, 386BSD 0.1 unknown origin
7 #include <machine/apic.h>
8 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_BIT(irq_num) (1 << (irq_num))
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
22 pushl $0 ; /* dummy error code */ \
23 pushl $0 ; /* dummy trap type */ \
25 pushl %ds ; /* save data and extra segments ... */ \
37 * Macros for interrupt entry, call to handler, and exit.
40 #define FAST_INTR(irq_num, vec_name) \
50 FAKE_MCOUNT(13*4(%esp)) ; \
51 movl PCPU(CURPROC),%ebx ; \
52 incl P_INTR_NESTING_LEVEL(%ebx) ; \
53 pushl intr_unit + (irq_num) * 4 ; \
54 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
56 movl $0, lapic+LA_EOI ; \
58 incl cnt+V_INTR ; /* book-keeping can wait */ \
59 movl intr_countp + (irq_num) * 4, %eax ; \
62 decl P_INTR_NESTING_LEVEL(%ebx) ; \
66 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
67 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
69 #define MASK_IRQ(irq_num) \
70 IMASK_LOCK ; /* into critical reg */ \
71 testl $IRQ_BIT(irq_num), apic_imen ; \
72 jne 7f ; /* masked, don't mask */ \
73 orl $IRQ_BIT(irq_num), apic_imen ; /* set the mask bit */ \
74 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
75 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
76 movl %eax, (%ecx) ; /* write the index */ \
77 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
78 orl $IOART_INTMASK, %eax ; /* set the mask */ \
79 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
80 7: ; /* already masked */ \
83 * Test to see whether we are handling an edge or level triggered INT.
84 * Level-triggered INTs must still be masked as we don't clear the source,
85 * and the EOI cycle would cause redundant INTs to occur.
87 #define MASK_LEVEL_IRQ(irq_num) \
88 testl $IRQ_BIT(irq_num), apic_pin_trigger ; \
89 jz 9f ; /* edge, don't mask */ \
94 #ifdef APIC_INTR_REORDER
95 #define EOI_IRQ(irq_num) \
96 movl apic_isrbit_location + 8 * (irq_num), %eax ; \
98 testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
99 jz 9f ; /* not active */ \
100 movl $0, lapic+LA_EOI ; \
104 #define EOI_IRQ(irq_num) \
105 testl $IRQ_BIT(irq_num), lapic+LA_ISR1; \
106 jz 9f ; /* not active */ \
107 movl $0, lapic+LA_EOI; \
113 * Test to see if the source is currently masked, clear if so.
115 #define UNMASK_IRQ(irq_num) \
116 IMASK_LOCK ; /* into critical reg */ \
117 testl $IRQ_BIT(irq_num), _apic_imen ; \
118 je 7f ; /* bit clear, not masked */ \
119 andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \
120 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
121 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
122 movl %eax, (%ecx) ; /* write the index */ \
123 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
124 andl $~IOART_INTMASK, %eax ; /* clear the mask */ \
125 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
126 7: ; /* already unmasked */ \
130 * Slow, threaded interrupts.
132 * XXX Most of the parameters here are obsolete. Fix this when we're
134 * XXX we really shouldn't return via doreti if we just schedule the
135 * interrupt handler and don't run anything. We could just do an
138 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
141 /* _XintrNN: entry point used by IDT/HWIs via _vec[]. */ \
144 movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
147 movl $KPSEL, %eax ; \
150 maybe_extra_ipending ; \
152 MASK_LEVEL_IRQ(irq_num) ; \
155 movl PCPU(CURPROC),%ebx ; \
156 incl P_INTR_NESTING_LEVEL(%ebx) ; \
158 /* entry point used by doreti_unpend for HWIs. */ \
159 __CONCAT(Xresume,irq_num): ; \
160 FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
161 pushl $irq_num; /* pass the IRQ */ \
163 addl $4, %esp ; /* discard the parameter */ \
165 decl P_INTR_NESTING_LEVEL(%ebx) ; \
170 * Handle "spurious INTerrupts".
172 * This is different than the "spurious INTerrupt" generated by an
173 * 8259 PIC for missing INTs. See the APIC documentation for details.
174 * This routine should NOT do an 'EOI' cycle.
181 /* No EOI cycle used here */
187 * Handle TLB shootdowns.
195 #ifdef COUNT_XINVLTLB_HITS
199 movl PCPU(CPUID), %eax
203 #endif /* COUNT_XINVLTLB_HITS */
205 movl %cr3, %eax /* invalidate the TLB */
208 ss /* stack segment, avoid %ds load */
209 movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
218 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
220 * - Stores current cpu state in checkstate_cpustate[cpuid]
221 * 0 == user, 1 == sys, 2 == intr
222 * - Stores current process in checkstate_curproc[cpuid]
224 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
226 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
231 .globl Xcpucheckstate
232 .globl checkstate_cpustate
233 .globl checkstate_curproc
238 pushl %ds /* save current data segment */
242 mov %ax, %ds /* use KERNEL data segment */
246 movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
253 testl $PSL_VM, 24(%esp)
255 incl %ebx /* system or interrupt */
257 movl PCPU(CPUID), %eax
258 movl %ebx, checkstate_cpustate(,%eax,4)
259 movl PCPU(CURPROC), %ebx
260 movl %ebx, checkstate_curproc(,%eax,4)
263 movl %ebx, checkstate_pc(,%eax,4)
265 lock /* checkstate_probed_cpus |= (1<<id) */
266 btsl %eax, checkstate_probed_cpus
269 popl %ds /* restore previous data segment */
274 #endif /* BETTER_CLOCK */
277 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
279 * - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
281 * - We need a better method of triggering asts on other cpus.
290 mov %ax, %ds /* use KERNEL data segment */
295 movl PCPU(CPUID), %eax
296 lock /* checkstate_need_ast &= ~(1<<id) */
297 btrl %eax, checkstate_need_ast
298 movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
301 btsl %eax, checkstate_pending_ast
304 FAKE_MCOUNT(13*4(%esp))
306 MTX_LOCK_SPIN(sched_lock, 0)
307 movl PCPU(CURPROC),%ebx
308 orl $PS_ASTPENDING, P_SFLAG(%ebx)
310 movl PCPU(CPUID), %eax
312 btrl %eax, checkstate_pending_ast
314 btrl %eax, CNAME(resched_cpus)
316 orl $PS_NEEDRESCHED, P_SFLAG(%ebx)
318 incl CNAME(want_resched_cnt)
320 MTX_UNLOCK_SPIN(sched_lock)
322 incl CNAME(cpuast_cnt)
326 /* We are already in the process of delivering an ast for this CPU */
331 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
333 * - Signals its receipt.
334 * - Waits for permission to restart.
335 * - Signals its restart.
347 pushl %ds /* save current data segment */
351 mov %ax, %ds /* use KERNEL data segment */
355 movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
357 movl PCPU(CPUID), %eax
358 imull $PCB_SIZE, %eax
359 leal CNAME(stoppcbs)(%eax), %eax
361 call CNAME(savectx) /* Save process context */
365 movl PCPU(CPUID), %eax
368 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
370 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
374 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
376 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
381 movl CNAME(cpustop_restartfunc), %eax
384 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
389 popl %ds /* restore previous data segment */
399 FAST_INTR(0,fastintr0)
400 FAST_INTR(1,fastintr1)
401 FAST_INTR(2,fastintr2)
402 FAST_INTR(3,fastintr3)
403 FAST_INTR(4,fastintr4)
404 FAST_INTR(5,fastintr5)
405 FAST_INTR(6,fastintr6)
406 FAST_INTR(7,fastintr7)
407 FAST_INTR(8,fastintr8)
408 FAST_INTR(9,fastintr9)
409 FAST_INTR(10,fastintr10)
410 FAST_INTR(11,fastintr11)
411 FAST_INTR(12,fastintr12)
412 FAST_INTR(13,fastintr13)
413 FAST_INTR(14,fastintr14)
414 FAST_INTR(15,fastintr15)
415 FAST_INTR(16,fastintr16)
416 FAST_INTR(17,fastintr17)
417 FAST_INTR(18,fastintr18)
418 FAST_INTR(19,fastintr19)
419 FAST_INTR(20,fastintr20)
420 FAST_INTR(21,fastintr21)
421 FAST_INTR(22,fastintr22)
422 FAST_INTR(23,fastintr23)
423 FAST_INTR(24,fastintr24)
424 FAST_INTR(25,fastintr25)
425 FAST_INTR(26,fastintr26)
426 FAST_INTR(27,fastintr27)
427 FAST_INTR(28,fastintr28)
428 FAST_INTR(29,fastintr29)
429 FAST_INTR(30,fastintr30)
430 FAST_INTR(31,fastintr31)
431 #define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
432 /* Threaded interrupts */
433 INTR(0,intr0, CLKINTR_PENDING)
468 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
470 * - Calls the generic rendezvous action function.
478 mov %ax, %ds /* use KERNEL data segment */
483 call smp_rendezvous_action
485 movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
492 #ifdef COUNT_XINVLTLB_HITS
496 #endif /* COUNT_XINVLTLB_HITS */
498 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
499 .globl stopped_cpus, started_cpus
506 .globl checkstate_probed_cpus
507 checkstate_probed_cpus:
509 #endif /* BETTER_CLOCK */
510 .globl checkstate_need_ast
513 checkstate_pending_ast:
515 .globl CNAME(resched_cpus)
516 .globl CNAME(want_resched_cnt)
517 .globl CNAME(cpuast_cnt)
518 .globl CNAME(cpustop_restartfunc)
521 CNAME(want_resched_cnt):
525 CNAME(cpustop_restartfunc):
528 .globl apic_pin_trigger