2 * from: vector.s, 386BSD 0.1 unknown origin
7 #include <machine/apic.h>
8 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_BIT(irq_num) (1 << (irq_num))
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
22 pushl $0 ; /* dummy error code */ \
23 pushl $0 ; /* dummy trap type */ \
25 pushl %ds ; /* save data and extra segments ... */ \
37 * Macros for interrupt entry, call to handler, and exit.
40 #define FAST_INTR(irq_num, vec_name) \
50 FAKE_MCOUNT(13*4(%esp)) ; \
51 incb PCPU(INTR_NESTING_LEVEL) ; \
52 pushl _intr_unit + (irq_num) * 4 ; \
53 call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
55 movl $0, lapic_eoi ; \
57 incl _cnt+V_INTR ; /* book-keeping can wait */ \
58 movl _intr_countp + (irq_num) * 4, %eax ; \
64 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
65 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
67 #define MASK_IRQ(irq_num) \
68 IMASK_LOCK ; /* into critical reg */ \
69 testl $IRQ_BIT(irq_num), _apic_imen ; \
70 jne 7f ; /* masked, don't mask */ \
71 orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
72 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
73 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
74 movl %eax, (%ecx) ; /* write the index */ \
75 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
76 orl $IOART_INTMASK, %eax ; /* set the mask */ \
77 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
78 7: ; /* already masked */ \
81 * Test to see whether we are handling an edge or level triggered INT.
82 * Level-triggered INTs must still be masked as we don't clear the source,
83 * and the EOI cycle would cause redundant INTs to occur.
85 #define MASK_LEVEL_IRQ(irq_num) \
86 testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
87 jz 9f ; /* edge, don't mask */ \
92 #ifdef APIC_INTR_REORDER
93 #define EOI_IRQ(irq_num) \
94 movl _apic_isrbit_location + 8 * (irq_num), %eax ; \
96 testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
97 jz 9f ; /* not active */ \
98 movl $0, lapic_eoi ; \
99 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
103 #define EOI_IRQ(irq_num) \
104 testl $IRQ_BIT(irq_num), lapic_isr1; \
105 jz 9f ; /* not active */ \
106 movl $0, lapic_eoi; \
107 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
113 * Test to see if the source is currently masked, clear if so.
115 #define UNMASK_IRQ(irq_num) \
116 IMASK_LOCK ; /* into critical reg */ \
117 testl $IRQ_BIT(irq_num), _apic_imen ; \
118 je 7f ; /* bit clear, not masked */ \
119 andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \
120 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
121 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
122 movl %eax, (%ecx) ; /* write the index */ \
123 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
124 andl $~IOART_INTMASK, %eax ; /* clear the mask */ \
125 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
126 7: ; /* already unmasked */ \
129 #ifdef APIC_INTR_DIAGNOSTIC
130 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
134 pushl $CNAME(apic_itrace_debuglock)
135 call CNAME(s_lock_np)
137 movl CNAME(apic_itrace_debugbuffer_idx), %ecx
139 movl PCPU(CPUID), %eax
142 movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2)
145 movl %ecx, CNAME(apic_itrace_debugbuffer_idx)
146 pushl $CNAME(apic_itrace_debuglock)
147 call CNAME(s_unlock_np)
153 #define APIC_ITRACE(name, irq_num, id) \
154 lock ; /* MP-safe */ \
155 incl CNAME(name) + (irq_num) * 4 ; \
159 movl $(irq_num), %eax ; \
160 cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \
163 call log_intr_event ; \
170 #define APIC_ITRACE(name, irq_num, id) \
171 lock ; /* MP-safe */ \
172 incl CNAME(name) + (irq_num) * 4
175 #define APIC_ITRACE_ENTER 1
176 #define APIC_ITRACE_EOI 2
177 #define APIC_ITRACE_TRYISRLOCK 3
178 #define APIC_ITRACE_GOTISRLOCK 4
179 #define APIC_ITRACE_ENTER2 5
180 #define APIC_ITRACE_LEAVE 6
181 #define APIC_ITRACE_UNMASK 7
182 #define APIC_ITRACE_ACTIVE 8
183 #define APIC_ITRACE_MASKED 9
184 #define APIC_ITRACE_NOISRLOCK 10
185 #define APIC_ITRACE_MASKED2 11
186 #define APIC_ITRACE_SPLZ 12
187 #define APIC_ITRACE_DORETI 13
190 #define APIC_ITRACE(name, irq_num, id)
194 * Slow, threaded interrupts.
196 * XXX Most of the parameters here are obsolete. Fix this when we're
198 * XXX we really shouldn't return via doreti if we just schedule the
199 * interrupt handler and don't run anything. We could just do an
202 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
205 /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \
208 movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
211 movl $KPSEL, %eax ; \
214 maybe_extra_ipending ; \
216 APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
218 MASK_LEVEL_IRQ(irq_num) ; \
221 incb PCPU(INTR_NESTING_LEVEL) ; \
223 /* entry point used by doreti_unpend for HWIs. */ \
224 __CONCAT(Xresume,irq_num): ; \
225 FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
226 pushl $irq_num; /* pass the IRQ */ \
227 APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
230 addl $4, %esp ; /* discard the parameter */ \
231 APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
237 * Handle "spurious INTerrupts".
239 * This is different than the "spurious INTerrupt" generated by an
240 * 8259 PIC for missing INTs. See the APIC documentation for details.
241 * This routine should NOT do an 'EOI' cycle.
248 /* No EOI cycle used here */
254 * Handle TLB shootdowns.
262 #ifdef COUNT_XINVLTLB_HITS
266 movl PCPU(CPUID), %eax
270 #endif /* COUNT_XINVLTLB_HITS */
272 movl %cr3, %eax /* invalidate the TLB */
275 ss /* stack segment, avoid %ds load */
276 movl $0, lapic_eoi /* End Of Interrupt to APIC */
285 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
287 * - Stores current cpu state in checkstate_cpustate[cpuid]
288 * 0 == user, 1 == sys, 2 == intr
289 * - Stores current process in checkstate_curproc[cpuid]
291 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
293 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
298 .globl _Xcpucheckstate
299 .globl _checkstate_cpustate
300 .globl _checkstate_curproc
301 .globl _checkstate_pc
305 pushl %ds /* save current data segment */
309 mov %ax, %ds /* use KERNEL data segment */
313 movl $0, lapic_eoi /* End Of Interrupt to APIC */
320 testl $PSL_VM, 24(%esp)
322 incl %ebx /* system or interrupt */
324 movl PCPU(CPUID), %eax
325 movl %ebx, _checkstate_cpustate(,%eax,4)
326 movl PCPU(CURPROC), %ebx
327 movl %ebx, _checkstate_curproc(,%eax,4)
330 movl %ebx, _checkstate_pc(,%eax,4)
332 lock /* checkstate_probed_cpus |= (1<<id) */
333 btsl %eax, _checkstate_probed_cpus
336 popl %ds /* restore previous data segment */
341 #endif /* BETTER_CLOCK */
344 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
346 * - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
348 * - We need a better method of triggering asts on other cpus.
357 mov %ax, %ds /* use KERNEL data segment */
362 movl PCPU(CPUID), %eax
363 lock /* checkstate_need_ast &= ~(1<<id) */
364 btrl %eax, _checkstate_need_ast
365 movl $0, lapic_eoi /* End Of Interrupt to APIC */
368 btsl %eax, _checkstate_pending_ast
371 FAKE_MCOUNT(13*4(%esp))
373 orl $AST_PENDING, PCPU(ASTPENDING) /* XXX */
374 incb PCPU(INTR_NESTING_LEVEL)
377 movl PCPU(CPUID), %eax
379 btrl %eax, _checkstate_pending_ast
381 btrl %eax, CNAME(resched_cpus)
383 orl $AST_PENDING+AST_RESCHED, PCPU(ASTPENDING)
385 incl CNAME(want_resched_cnt)
388 incl CNAME(cpuast_cnt)
392 /* We are already in the process of delivering an ast for this CPU */
398 * Executed by a CPU when it receives an XFORWARD_IRQ IPI.
407 mov %ax, %ds /* use KERNEL data segment */
412 movl $0, lapic_eoi /* End Of Interrupt to APIC */
414 FAKE_MCOUNT(13*4(%esp))
417 incl CNAME(forward_irq_hitcnt)
418 cmpb $4, PCPU(INTR_NESTING_LEVEL)
421 incb PCPU(INTR_NESTING_LEVEL)
425 jmp doreti_next /* Handle forwarded interrupt */
428 incl CNAME(forward_irq_toodeepcnt)
442 cmpl $0, CNAME(forward_irq_enabled)
445 /* XXX - this is broken now, because mp_lock doesn't exist
450 movl $0, %eax /* Pick CPU #0 if noone has lock */
453 movl _cpu_num_to_apic_id(,%eax,4),%ecx
455 movl lapic_icr_hi, %eax
456 andl $~APIC_ID_MASK, %eax
458 movl %eax, lapic_icr_hi
461 movl lapic_icr_lo, %eax
462 andl $APIC_DELSTAT_MASK,%eax
464 movl lapic_icr_lo, %eax
465 andl $APIC_RESV2_MASK, %eax
466 orl $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
467 movl %eax, lapic_icr_lo
469 movl lapic_icr_lo, %eax
470 andl $APIC_DELSTAT_MASK,%eax
477 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
479 * - Signals its receipt.
480 * - Waits for permission to restart.
481 * - Signals its restart.
493 pushl %ds /* save current data segment */
497 mov %ax, %ds /* use KERNEL data segment */
501 movl $0, lapic_eoi /* End Of Interrupt to APIC */
503 movl PCPU(CPUID), %eax
504 imull $PCB_SIZE, %eax
505 leal CNAME(stoppcbs)(%eax), %eax
507 call CNAME(savectx) /* Save process context */
511 movl PCPU(CPUID), %eax
514 btsl %eax, _stopped_cpus /* stopped_cpus |= (1<<id) */
516 btl %eax, _started_cpus /* while (!(started_cpus & (1<<id))) */
520 btrl %eax, _started_cpus /* started_cpus &= ~(1<<id) */
522 btrl %eax, _stopped_cpus /* stopped_cpus &= ~(1<<id) */
527 movl CNAME(cpustop_restartfunc), %eax
530 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
535 popl %ds /* restore previous data segment */
545 FAST_INTR(0,fastintr0)
546 FAST_INTR(1,fastintr1)
547 FAST_INTR(2,fastintr2)
548 FAST_INTR(3,fastintr3)
549 FAST_INTR(4,fastintr4)
550 FAST_INTR(5,fastintr5)
551 FAST_INTR(6,fastintr6)
552 FAST_INTR(7,fastintr7)
553 FAST_INTR(8,fastintr8)
554 FAST_INTR(9,fastintr9)
555 FAST_INTR(10,fastintr10)
556 FAST_INTR(11,fastintr11)
557 FAST_INTR(12,fastintr12)
558 FAST_INTR(13,fastintr13)
559 FAST_INTR(14,fastintr14)
560 FAST_INTR(15,fastintr15)
561 FAST_INTR(16,fastintr16)
562 FAST_INTR(17,fastintr17)
563 FAST_INTR(18,fastintr18)
564 FAST_INTR(19,fastintr19)
565 FAST_INTR(20,fastintr20)
566 FAST_INTR(21,fastintr21)
567 FAST_INTR(22,fastintr22)
568 FAST_INTR(23,fastintr23)
569 FAST_INTR(24,fastintr24)
570 FAST_INTR(25,fastintr25)
571 FAST_INTR(26,fastintr26)
572 FAST_INTR(27,fastintr27)
573 FAST_INTR(28,fastintr28)
574 FAST_INTR(29,fastintr29)
575 FAST_INTR(30,fastintr30)
576 FAST_INTR(31,fastintr31)
577 #define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
578 /* Threaded interrupts */
579 INTR(0,intr0, CLKINTR_PENDING)
614 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
616 * - Calls the generic rendezvous action function.
624 mov %ax, %ds /* use KERNEL data segment */
629 call _smp_rendezvous_action
631 movl $0, lapic_eoi /* End Of Interrupt to APIC */
638 /* active flag for lazy masking */
643 #ifdef COUNT_XINVLTLB_HITS
647 #endif /* COUNT_XINVLTLB_HITS */
649 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
650 .globl _stopped_cpus, _started_cpus
657 .globl _checkstate_probed_cpus
658 _checkstate_probed_cpus:
660 #endif /* BETTER_CLOCK */
661 .globl _checkstate_need_ast
662 _checkstate_need_ast:
664 _checkstate_pending_ast:
666 .globl CNAME(forward_irq_misscnt)
667 .globl CNAME(forward_irq_toodeepcnt)
668 .globl CNAME(forward_irq_hitcnt)
669 .globl CNAME(resched_cpus)
670 .globl CNAME(want_resched_cnt)
671 .globl CNAME(cpuast_cnt)
672 .globl CNAME(cpustop_restartfunc)
673 CNAME(forward_irq_misscnt):
675 CNAME(forward_irq_hitcnt):
677 CNAME(forward_irq_toodeepcnt):
681 CNAME(want_resched_cnt):
685 CNAME(cpustop_restartfunc):
688 .globl _apic_pin_trigger