2 * from: vector.s, 386BSD 0.1 unknown origin
7 #include <machine/apic.h>
8 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_BIT(irq_num) (1 << (irq_num))
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
22 pushl $0 ; /* dummy error code */ \
23 pushl $0 ; /* dummy trap type */ \
25 pushl %ds ; /* save data and extra segments ... */ \
37 * Macros for interrupt entry, call to handler, and exit.
40 #define FAST_INTR(irq_num, vec_name) \
50 FAKE_MCOUNT(13*4(%esp)) ; \
51 movl PCPU(CURPROC),%ebx ; \
52 incl P_INTR_NESTING_LEVEL(%ebx) ; \
53 pushl intr_unit + (irq_num) * 4 ; \
54 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
56 movl $0, lapic+LA_EOI ; \
58 incl cnt+V_INTR ; /* book-keeping can wait */ \
59 movl intr_countp + (irq_num) * 4, %eax ; \
62 decl P_INTR_NESTING_LEVEL(%ebx) ; \
66 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
67 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
69 #define MASK_IRQ(irq_num) \
70 IMASK_LOCK ; /* into critical reg */ \
71 testl $IRQ_BIT(irq_num), apic_imen ; \
72 jne 7f ; /* masked, don't mask */ \
73 orl $IRQ_BIT(irq_num), apic_imen ; /* set the mask bit */ \
74 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
75 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
76 movl %eax, (%ecx) ; /* write the index */ \
77 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
78 orl $IOART_INTMASK, %eax ; /* set the mask */ \
79 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
80 7: ; /* already masked */ \
83 * Test to see whether we are handling an edge or level triggered INT.
84 * Level-triggered INTs must still be masked as we don't clear the source,
85 * and the EOI cycle would cause redundant INTs to occur.
87 #define MASK_LEVEL_IRQ(irq_num) \
88 testl $IRQ_BIT(irq_num), apic_pin_trigger ; \
89 jz 9f ; /* edge, don't mask */ \
94 #ifdef APIC_INTR_REORDER
95 #define EOI_IRQ(irq_num) \
96 movl apic_isrbit_location + 8 * (irq_num), %eax ; \
98 testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
99 jz 9f ; /* not active */ \
100 movl $0, lapic+LA_EOI ; \
104 #define EOI_IRQ(irq_num) \
105 testl $IRQ_BIT(irq_num), lapic+LA_ISR1; \
106 jz 9f ; /* not active */ \
107 movl $0, lapic+LA_EOI; \
113 * Test to see if the source is currently masked, clear if so.
115 #define UNMASK_IRQ(irq_num) \
116 IMASK_LOCK ; /* into critical reg */ \
117 testl $IRQ_BIT(irq_num), _apic_imen ; \
118 je 7f ; /* bit clear, not masked */ \
119 andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \
120 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
121 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
122 movl %eax, (%ecx) ; /* write the index */ \
123 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
124 andl $~IOART_INTMASK, %eax ; /* clear the mask */ \
125 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
126 7: ; /* already unmasked */ \
130 * Slow, threaded interrupts.
132 * XXX Most of the parameters here are obsolete. Fix this when we're
134 * XXX we really shouldn't return via doreti if we just schedule the
135 * interrupt handler and don't run anything. We could just do an
138 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
141 /* _XintrNN: entry point used by IDT/HWIs via _vec[]. */ \
144 movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
147 movl $KPSEL, %eax ; \
150 maybe_extra_ipending ; \
152 MASK_LEVEL_IRQ(irq_num) ; \
155 movl PCPU(CURPROC),%ebx ; \
156 incl P_INTR_NESTING_LEVEL(%ebx) ; \
158 /* entry point used by doreti_unpend for HWIs. */ \
159 __CONCAT(Xresume,irq_num): ; \
160 FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
161 pushl $irq_num; /* pass the IRQ */ \
163 addl $4, %esp ; /* discard the parameter */ \
165 decl P_INTR_NESTING_LEVEL(%ebx) ; \
170 * Handle "spurious INTerrupts".
172 * This is different than the "spurious INTerrupt" generated by an
173 * 8259 PIC for missing INTs. See the APIC documentation for details.
174 * This routine should NOT do an 'EOI' cycle.
181 /* No EOI cycle used here */
186 * Handle TLB shootdowns.
194 #ifdef COUNT_XINVLTLB_HITS
198 movl PCPU(CPUID), %eax
202 #endif /* COUNT_XINVLTLB_HITS */
204 movl %cr3, %eax /* invalidate the TLB */
207 ss /* stack segment, avoid %ds load */
208 movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
214 * Forward hardclock to another CPU. Pushes a trapframe and calls
215 * forwarded_hardclock().
222 movl $KDSEL, %eax /* reload with kernel's data segment */
228 movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
230 movl PCPU(CURPROC),%ebx
231 incl P_INTR_NESTING_LEVEL(%ebx)
232 call forwarded_hardclock
233 decl P_INTR_NESTING_LEVEL(%ebx)
238 * Forward statclock to another CPU. Pushes a trapframe and calls
239 * forwarded_statclock().
246 movl $KDSEL, %eax /* reload with kernel's data segment */
252 movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
254 FAKE_MCOUNT(13*4(%esp))
255 movl PCPU(CURPROC),%ebx
256 incl P_INTR_NESTING_LEVEL(%ebx)
257 call forwarded_statclock
258 decl P_INTR_NESTING_LEVEL(%ebx)
263 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
265 * The other CPU has already executed aston() or need_resched() on our
266 * current process, so we simply need to ack the interrupt and return
267 * via doreti to run ast().
276 mov %ax, %ds /* use KERNEL data segment */
281 movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
283 FAKE_MCOUNT(13*4(%esp))
289 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
291 * - Signals its receipt.
292 * - Waits for permission to restart.
293 * - Signals its restart.
304 pushl %ds /* save current data segment */
308 mov %ax, %ds /* use KERNEL data segment */
312 movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
314 movl PCPU(CPUID), %eax
315 imull $PCB_SIZE, %eax
316 leal CNAME(stoppcbs)(%eax), %eax
318 call CNAME(savectx) /* Save process context */
321 movl PCPU(CPUID), %eax
324 btsl %eax, CNAME(stopped_cpus) /* stopped_cpus |= (1<<id) */
326 btl %eax, CNAME(started_cpus) /* while (!(started_cpus & (1<<id))) */
330 btrl %eax, CNAME(started_cpus) /* started_cpus &= ~(1<<id) */
332 btrl %eax, CNAME(stopped_cpus) /* stopped_cpus &= ~(1<<id) */
337 movl CNAME(cpustop_restartfunc), %eax
340 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
345 popl %ds /* restore previous data segment */
355 FAST_INTR(0,fastintr0)
356 FAST_INTR(1,fastintr1)
357 FAST_INTR(2,fastintr2)
358 FAST_INTR(3,fastintr3)
359 FAST_INTR(4,fastintr4)
360 FAST_INTR(5,fastintr5)
361 FAST_INTR(6,fastintr6)
362 FAST_INTR(7,fastintr7)
363 FAST_INTR(8,fastintr8)
364 FAST_INTR(9,fastintr9)
365 FAST_INTR(10,fastintr10)
366 FAST_INTR(11,fastintr11)
367 FAST_INTR(12,fastintr12)
368 FAST_INTR(13,fastintr13)
369 FAST_INTR(14,fastintr14)
370 FAST_INTR(15,fastintr15)
371 FAST_INTR(16,fastintr16)
372 FAST_INTR(17,fastintr17)
373 FAST_INTR(18,fastintr18)
374 FAST_INTR(19,fastintr19)
375 FAST_INTR(20,fastintr20)
376 FAST_INTR(21,fastintr21)
377 FAST_INTR(22,fastintr22)
378 FAST_INTR(23,fastintr23)
379 FAST_INTR(24,fastintr24)
380 FAST_INTR(25,fastintr25)
381 FAST_INTR(26,fastintr26)
382 FAST_INTR(27,fastintr27)
383 FAST_INTR(28,fastintr28)
384 FAST_INTR(29,fastintr29)
385 FAST_INTR(30,fastintr30)
386 FAST_INTR(31,fastintr31)
387 #define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
388 /* Threaded interrupts */
389 INTR(0,intr0, CLKINTR_PENDING)
424 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
426 * - Calls the generic rendezvous action function.
434 mov %ax, %ds /* use KERNEL data segment */
439 call smp_rendezvous_action
441 movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
448 #ifdef COUNT_XINVLTLB_HITS
452 #endif /* COUNT_XINVLTLB_HITS */
454 .globl apic_pin_trigger