2 * from: vector.s, 386BSD 0.1 unknown origin
7 #include <machine/apic.h>
8 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_BIT(irq_num) (1 << (irq_num))
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
22 pushl $0 ; /* dummy error code */ \
23 pushl $0 ; /* dummy trap type */ \
25 pushl %ds ; /* save data and extra segments ... */ \
37 * Macros for interrupt entry, call to handler, and exit.
40 #define FAST_INTR(irq_num, vec_name) \
50 FAKE_MCOUNT(13*4(%esp)) ; \
51 movl PCPU(CURPROC),%ebx ; \
52 incl P_INTR_NESTING_LEVEL(%ebx) ; \
53 pushl intr_unit + (irq_num) * 4 ; \
54 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
56 movl $0, lapic+LA_EOI ; \
58 incl cnt+V_INTR ; /* book-keeping can wait */ \
59 movl intr_countp + (irq_num) * 4, %eax ; \
62 decl P_INTR_NESTING_LEVEL(%ebx) ; \
66 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
67 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
69 #define MASK_IRQ(irq_num) \
70 IMASK_LOCK ; /* into critical reg */ \
71 testl $IRQ_BIT(irq_num), apic_imen ; \
72 jne 7f ; /* masked, don't mask */ \
73 orl $IRQ_BIT(irq_num), apic_imen ; /* set the mask bit */ \
74 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
75 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
76 movl %eax, (%ecx) ; /* write the index */ \
77 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
78 orl $IOART_INTMASK, %eax ; /* set the mask */ \
79 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
80 7: ; /* already masked */ \
83 * Test to see whether we are handling an edge or level triggered INT.
84 * Level-triggered INTs must still be masked as we don't clear the source,
85 * and the EOI cycle would cause redundant INTs to occur.
87 #define MASK_LEVEL_IRQ(irq_num) \
88 testl $IRQ_BIT(irq_num), apic_pin_trigger ; \
89 jz 9f ; /* edge, don't mask */ \
94 #ifdef APIC_INTR_REORDER
95 #define EOI_IRQ(irq_num) \
96 movl apic_isrbit_location + 8 * (irq_num), %eax ; \
98 testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
99 jz 9f ; /* not active */ \
100 movl $0, lapic+LA_EOI ; \
104 #define EOI_IRQ(irq_num) \
105 testl $IRQ_BIT(irq_num), lapic+LA_ISR1; \
106 jz 9f ; /* not active */ \
107 movl $0, lapic+LA_EOI; \
113 * Test to see if the source is currently masked, clear if so.
115 #define UNMASK_IRQ(irq_num) \
116 IMASK_LOCK ; /* into critical reg */ \
117 testl $IRQ_BIT(irq_num), _apic_imen ; \
118 je 7f ; /* bit clear, not masked */ \
119 andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \
120 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
121 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
122 movl %eax, (%ecx) ; /* write the index */ \
123 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
124 andl $~IOART_INTMASK, %eax ; /* clear the mask */ \
125 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
126 7: ; /* already unmasked */ \
130 * Slow, threaded interrupts.
132 * XXX Most of the parameters here are obsolete. Fix this when we're
134 * XXX we really shouldn't return via doreti if we just schedule the
135 * interrupt handler and don't run anything. We could just do an
138 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
141 /* _XintrNN: entry point used by IDT/HWIs via _vec[]. */ \
144 movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
147 movl $KPSEL, %eax ; \
150 maybe_extra_ipending ; \
152 MASK_LEVEL_IRQ(irq_num) ; \
155 movl PCPU(CURPROC),%ebx ; \
156 incl P_INTR_NESTING_LEVEL(%ebx) ; \
158 /* entry point used by doreti_unpend for HWIs. */ \
159 __CONCAT(Xresume,irq_num): ; \
160 FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
161 pushl $irq_num; /* pass the IRQ */ \
164 addl $4, %esp ; /* discard the parameter */ \
166 decl P_INTR_NESTING_LEVEL(%ebx) ; \
171 * Handle "spurious INTerrupts".
173 * This is different than the "spurious INTerrupt" generated by an
174 * 8259 PIC for missing INTs. See the APIC documentation for details.
175 * This routine should NOT do an 'EOI' cycle.
182 /* No EOI cycle used here */
188 * Handle TLB shootdowns.
196 #ifdef COUNT_XINVLTLB_HITS
200 movl PCPU(CPUID), %eax
204 #endif /* COUNT_XINVLTLB_HITS */
206 movl %cr3, %eax /* invalidate the TLB */
209 ss /* stack segment, avoid %ds load */
210 movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
219 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
221 * - Stores current cpu state in checkstate_cpustate[cpuid]
222 * 0 == user, 1 == sys, 2 == intr
223 * - Stores current process in checkstate_curproc[cpuid]
225 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
227 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
232 .globl Xcpucheckstate
233 .globl checkstate_cpustate
234 .globl checkstate_curproc
239 pushl %ds /* save current data segment */
243 mov %ax, %ds /* use KERNEL data segment */
247 movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
254 testl $PSL_VM, 24(%esp)
256 incl %ebx /* system or interrupt */
258 movl PCPU(CPUID), %eax
259 movl %ebx, checkstate_cpustate(,%eax,4)
260 movl PCPU(CURPROC), %ebx
261 movl %ebx, checkstate_curproc(,%eax,4)
264 movl %ebx, checkstate_pc(,%eax,4)
266 lock /* checkstate_probed_cpus |= (1<<id) */
267 btsl %eax, checkstate_probed_cpus
270 popl %ds /* restore previous data segment */
275 #endif /* BETTER_CLOCK */
278 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
280 * - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
282 * - We need a better method of triggering asts on other cpus.
291 mov %ax, %ds /* use KERNEL data segment */
296 movl PCPU(CPUID), %eax
297 lock /* checkstate_need_ast &= ~(1<<id) */
298 btrl %eax, checkstate_need_ast
299 movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
302 btsl %eax, checkstate_pending_ast
305 FAKE_MCOUNT(13*4(%esp))
307 MTX_LOCK_SPIN(sched_lock, 0)
308 movl PCPU(CURPROC),%ebx
309 orl $PS_ASTPENDING, P_SFLAG(%ebx)
311 movl PCPU(CPUID), %eax
313 btrl %eax, checkstate_pending_ast
315 btrl %eax, CNAME(resched_cpus)
317 orl $PS_NEEDRESCHED, P_SFLAG(%ebx)
319 incl CNAME(want_resched_cnt)
321 MTX_UNLOCK_SPIN(sched_lock)
323 incl CNAME(cpuast_cnt)
327 /* We are already in the process of delivering an ast for this CPU */
332 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
334 * - Signals its receipt.
335 * - Waits for permission to restart.
336 * - Signals its restart.
348 pushl %ds /* save current data segment */
352 mov %ax, %ds /* use KERNEL data segment */
356 movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
358 movl PCPU(CPUID), %eax
359 imull $PCB_SIZE, %eax
360 leal CNAME(stoppcbs)(%eax), %eax
362 call CNAME(savectx) /* Save process context */
366 movl PCPU(CPUID), %eax
369 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
371 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
375 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
377 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
382 movl CNAME(cpustop_restartfunc), %eax
385 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
390 popl %ds /* restore previous data segment */
400 FAST_INTR(0,fastintr0)
401 FAST_INTR(1,fastintr1)
402 FAST_INTR(2,fastintr2)
403 FAST_INTR(3,fastintr3)
404 FAST_INTR(4,fastintr4)
405 FAST_INTR(5,fastintr5)
406 FAST_INTR(6,fastintr6)
407 FAST_INTR(7,fastintr7)
408 FAST_INTR(8,fastintr8)
409 FAST_INTR(9,fastintr9)
410 FAST_INTR(10,fastintr10)
411 FAST_INTR(11,fastintr11)
412 FAST_INTR(12,fastintr12)
413 FAST_INTR(13,fastintr13)
414 FAST_INTR(14,fastintr14)
415 FAST_INTR(15,fastintr15)
416 FAST_INTR(16,fastintr16)
417 FAST_INTR(17,fastintr17)
418 FAST_INTR(18,fastintr18)
419 FAST_INTR(19,fastintr19)
420 FAST_INTR(20,fastintr20)
421 FAST_INTR(21,fastintr21)
422 FAST_INTR(22,fastintr22)
423 FAST_INTR(23,fastintr23)
424 FAST_INTR(24,fastintr24)
425 FAST_INTR(25,fastintr25)
426 FAST_INTR(26,fastintr26)
427 FAST_INTR(27,fastintr27)
428 FAST_INTR(28,fastintr28)
429 FAST_INTR(29,fastintr29)
430 FAST_INTR(30,fastintr30)
431 FAST_INTR(31,fastintr31)
432 #define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
433 /* Threaded interrupts */
434 INTR(0,intr0, CLKINTR_PENDING)
469 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
471 * - Calls the generic rendezvous action function.
479 mov %ax, %ds /* use KERNEL data segment */
484 call smp_rendezvous_action
486 movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
493 #ifdef COUNT_XINVLTLB_HITS
497 #endif /* COUNT_XINVLTLB_HITS */
499 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
500 .globl stopped_cpus, started_cpus
507 .globl checkstate_probed_cpus
508 checkstate_probed_cpus:
510 #endif /* BETTER_CLOCK */
511 .globl checkstate_need_ast
514 checkstate_pending_ast:
516 .globl CNAME(resched_cpus)
517 .globl CNAME(want_resched_cnt)
518 .globl CNAME(cpuast_cnt)
519 .globl CNAME(cpustop_restartfunc)
522 CNAME(want_resched_cnt):
526 CNAME(cpustop_restartfunc):
529 .globl apic_pin_trigger