2 * from: vector.s, 386BSD 0.1 unknown origin
7 #include <machine/apic.h>
8 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_BIT(irq_num) (1 << (irq_num))
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
20 * Macros for interrupt entry, call to handler, and exit.
23 #define FAST_INTR(irq_num, vec_name) \
27 pushl %eax ; /* save only call-used registers */ \
38 FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ; \
39 pushl _intr_unit + (irq_num) * 4 ; \
40 call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
42 movl $0, lapic_eoi ; \
44 incl _cnt+V_INTR ; /* book-keeping can wait */ \
45 movl _intr_countp + (irq_num) * 4, %eax ; \
61 pushl $0 ; /* dummy error code */ \
62 pushl $0 ; /* dummy trap type */ \
64 pushl %ds ; /* save data and extra segments ... */ \
75 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
76 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
78 #define MASK_IRQ(irq_num) \
79 IMASK_LOCK ; /* into critical reg */ \
80 testl $IRQ_BIT(irq_num), _apic_imen ; \
81 jne 7f ; /* masked, don't mask */ \
82 orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
83 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
84 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
85 movl %eax, (%ecx) ; /* write the index */ \
86 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
87 orl $IOART_INTMASK, %eax ; /* set the mask */ \
88 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
89 7: ; /* already masked */ \
92 * Test to see whether we are handling an edge or level triggered INT.
93 * Level-triggered INTs must still be masked as we don't clear the source,
94 * and the EOI cycle would cause redundant INTs to occur.
96 #define MASK_LEVEL_IRQ(irq_num) \
97 testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
98 jz 9f ; /* edge, don't mask */ \
103 #ifdef APIC_INTR_REORDER
104 #define EOI_IRQ(irq_num) \
105 movl _apic_isrbit_location + 8 * (irq_num), %eax ; \
106 movl (%eax), %eax ; \
107 testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
108 jz 9f ; /* not active */ \
109 movl $0, lapic_eoi ; \
110 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
114 #define EOI_IRQ(irq_num) \
115 testl $IRQ_BIT(irq_num), lapic_isr1; \
116 jz 9f ; /* not active */ \
117 movl $0, lapic_eoi; \
118 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
124 * Test to see if the source is currently masked, clear if so.
126 #define UNMASK_IRQ(irq_num) \
127 IMASK_LOCK ; /* into critical reg */ \
128 testl $IRQ_BIT(irq_num), _apic_imen ; \
129 je 7f ; /* bit clear, not masked */ \
130 andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \
131 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
132 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
133 movl %eax,(%ecx) ; /* write the index */ \
134 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
135 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
136 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
140 #ifdef APIC_INTR_DIAGNOSTIC
141 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
145 pushl $CNAME(apic_itrace_debuglock)
146 call CNAME(s_lock_np)
148 movl CNAME(apic_itrace_debugbuffer_idx), %ecx
153 movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2)
156 movl %ecx, CNAME(apic_itrace_debugbuffer_idx)
157 pushl $CNAME(apic_itrace_debuglock)
158 call CNAME(s_unlock_np)
164 #define APIC_ITRACE(name, irq_num, id) \
165 lock ; /* MP-safe */ \
166 incl CNAME(name) + (irq_num) * 4 ; \
170 movl $(irq_num), %eax ; \
171 cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \
174 call log_intr_event ; \
181 #define APIC_ITRACE(name, irq_num, id) \
182 lock ; /* MP-safe */ \
183 incl CNAME(name) + (irq_num) * 4
186 #define APIC_ITRACE_ENTER 1
187 #define APIC_ITRACE_EOI 2
188 #define APIC_ITRACE_TRYISRLOCK 3
189 #define APIC_ITRACE_GOTISRLOCK 4
190 #define APIC_ITRACE_ENTER2 5
191 #define APIC_ITRACE_LEAVE 6
192 #define APIC_ITRACE_UNMASK 7
193 #define APIC_ITRACE_ACTIVE 8
194 #define APIC_ITRACE_MASKED 9
195 #define APIC_ITRACE_NOISRLOCK 10
196 #define APIC_ITRACE_MASKED2 11
197 #define APIC_ITRACE_SPLZ 12
198 #define APIC_ITRACE_DORETI 13
201 #define APIC_ITRACE(name, irq_num, id)
205 * Slow, threaded interrupts.
207 * XXX Most of the parameters here are obsolete. Fix this when we're
209 * XXX we really shouldn't return via doreti if we just schedule the
210 * interrupt handler and don't run anything. We could just do an
213 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
216 /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \
219 movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
222 movl $KPSEL, %eax ; \
225 maybe_extra_ipending ; \
227 APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
229 MASK_LEVEL_IRQ(irq_num) ; \
232 incb _intr_nesting_level ; \
234 /* entry point used by doreti_unpend for HWIs. */ \
235 __CONCAT(Xresume,irq_num): ; \
236 FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
237 pushl $irq_num; /* pass the IRQ */ \
238 APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
241 addl $4, %esp ; /* discard the parameter */ \
242 APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
248 * Handle "spurious INTerrupts".
250 * This is different than the "spurious INTerrupt" generated by an
251 * 8259 PIC for missing INTs. See the APIC documentation for details.
252 * This routine should NOT do an 'EOI' cycle.
259 /* No EOI cycle used here */
265 * Handle TLB shootdowns.
273 #ifdef COUNT_XINVLTLB_HITS
281 #endif /* COUNT_XINVLTLB_HITS */
283 movl %cr3, %eax /* invalidate the TLB */
286 ss /* stack segment, avoid %ds load */
287 movl $0, lapic_eoi /* End Of Interrupt to APIC */
296 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
298 * - Stores current cpu state in checkstate_cpustate[cpuid]
299 * 0 == user, 1 == sys, 2 == intr
300 * - Stores current process in checkstate_curproc[cpuid]
302 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
304 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
309 .globl _Xcpucheckstate
310 .globl _checkstate_cpustate
311 .globl _checkstate_curproc
312 .globl _checkstate_pc
316 pushl %ds /* save current data segment */
320 mov %ax, %ds /* use KERNEL data segment */
324 movl $0, lapic_eoi /* End Of Interrupt to APIC */
331 testl $PSL_VM, 24(%esp)
333 incl %ebx /* system or interrupt */
336 movl %ebx, _checkstate_cpustate(,%eax,4)
338 movl %ebx, _checkstate_curproc(,%eax,4)
340 movl %ebx, _checkstate_pc(,%eax,4)
342 lock /* checkstate_probed_cpus |= (1<<id) */
343 btsl %eax, _checkstate_probed_cpus
346 popl %ds /* restore previous data segment */
351 #endif /* BETTER_CLOCK */
354 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
356 * - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
358 * - We need a better method of triggering asts on other cpus.
367 mov %ax, %ds /* use KERNEL data segment */
373 lock /* checkstate_need_ast &= ~(1<<id) */
374 btrl %eax, _checkstate_need_ast
375 movl $0, lapic_eoi /* End Of Interrupt to APIC */
378 btsl %eax, _checkstate_pending_ast
381 FAKE_MCOUNT(13*4(%esp))
383 orl $AST_PENDING, _astpending /* XXX */
384 incb _intr_nesting_level
389 btrl %eax, _checkstate_pending_ast
391 btrl %eax, CNAME(resched_cpus)
393 orl $AST_PENDING+AST_RESCHED,_astpending
395 incl CNAME(want_resched_cnt)
398 incl CNAME(cpuast_cnt)
402 /* We are already in the process of delivering an ast for this CPU */
408 * Executed by a CPU when it receives an XFORWARD_IRQ IPI.
417 mov %ax, %ds /* use KERNEL data segment */
422 movl $0, lapic_eoi /* End Of Interrupt to APIC */
424 FAKE_MCOUNT(13*4(%esp))
427 incl CNAME(forward_irq_hitcnt)
428 cmpb $4, _intr_nesting_level
431 incb _intr_nesting_level
435 jmp doreti_next /* Handle forwarded interrupt */
438 incl CNAME(forward_irq_toodeepcnt)
452 cmpl $0, CNAME(forward_irq_enabled)
455 /* XXX - this is broken now, because mp_lock doesn't exist
460 movl $0, %eax /* Pick CPU #0 if noone has lock */
463 movl _cpu_num_to_apic_id(,%eax,4),%ecx
465 movl lapic_icr_hi, %eax
466 andl $~APIC_ID_MASK, %eax
468 movl %eax, lapic_icr_hi
471 movl lapic_icr_lo, %eax
472 andl $APIC_DELSTAT_MASK,%eax
474 movl lapic_icr_lo, %eax
475 andl $APIC_RESV2_MASK, %eax
476 orl $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
477 movl %eax, lapic_icr_lo
479 movl lapic_icr_lo, %eax
480 andl $APIC_DELSTAT_MASK,%eax
487 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
489 * - Signals its receipt.
490 * - Waits for permission to restart.
491 * - Signals its restart.
503 pushl %ds /* save current data segment */
507 mov %ax, %ds /* use KERNEL data segment */
511 movl $0, lapic_eoi /* End Of Interrupt to APIC */
514 imull $PCB_SIZE, %eax
515 leal CNAME(stoppcbs)(%eax), %eax
517 call CNAME(savectx) /* Save process context */
524 btsl %eax, _stopped_cpus /* stopped_cpus |= (1<<id) */
526 btl %eax, _started_cpus /* while (!(started_cpus & (1<<id))) */
530 btrl %eax, _started_cpus /* started_cpus &= ~(1<<id) */
532 btrl %eax, _stopped_cpus /* stopped_cpus &= ~(1<<id) */
537 movl CNAME(cpustop_restartfunc), %eax
540 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
545 popl %ds /* restore previous data segment */
555 FAST_INTR(0,fastintr0)
556 FAST_INTR(1,fastintr1)
557 FAST_INTR(2,fastintr2)
558 FAST_INTR(3,fastintr3)
559 FAST_INTR(4,fastintr4)
560 FAST_INTR(5,fastintr5)
561 FAST_INTR(6,fastintr6)
562 FAST_INTR(7,fastintr7)
563 FAST_INTR(8,fastintr8)
564 FAST_INTR(9,fastintr9)
565 FAST_INTR(10,fastintr10)
566 FAST_INTR(11,fastintr11)
567 FAST_INTR(12,fastintr12)
568 FAST_INTR(13,fastintr13)
569 FAST_INTR(14,fastintr14)
570 FAST_INTR(15,fastintr15)
571 FAST_INTR(16,fastintr16)
572 FAST_INTR(17,fastintr17)
573 FAST_INTR(18,fastintr18)
574 FAST_INTR(19,fastintr19)
575 FAST_INTR(20,fastintr20)
576 FAST_INTR(21,fastintr21)
577 FAST_INTR(22,fastintr22)
578 FAST_INTR(23,fastintr23)
579 #define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
580 /* Threaded interrupts */
581 INTR(0,intr0, CLKINTR_PENDING)
608 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
610 * - Calls the generic rendezvous action function.
618 mov %ax, %ds /* use KERNEL data segment */
623 call _smp_rendezvous_action
625 movl $0, lapic_eoi /* End Of Interrupt to APIC */
632 * Addresses of interrupt handlers.
633 * XresumeNN: Resumption addresses for HWIs.
639 * ipl.s: doreti_unpend
641 .long Xresume0, Xresume1, Xresume2, Xresume3
642 .long Xresume4, Xresume5, Xresume6, Xresume7
643 .long Xresume8, Xresume9, Xresume10, Xresume11
644 .long Xresume12, Xresume13, Xresume14, Xresume15
645 .long Xresume16, Xresume17, Xresume18, Xresume19
646 .long Xresume20, Xresume21, Xresume22, Xresume23
649 * ipl.s: doreti_unpend
650 * apic_ipl.s: splz_unpend
652 .long _swi_null, swi_net, _swi_null, _swi_null
653 .long _swi_vm, _swi_null, _softclock
656 /* active flag for lazy masking */
661 #ifdef COUNT_XINVLTLB_HITS
665 #endif /* COUNT_XINVLTLB_HITS */
667 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
668 .globl _stopped_cpus, _started_cpus
675 .globl _checkstate_probed_cpus
676 _checkstate_probed_cpus:
678 #endif /* BETTER_CLOCK */
679 .globl _checkstate_need_ast
680 _checkstate_need_ast:
682 _checkstate_pending_ast:
684 .globl CNAME(forward_irq_misscnt)
685 .globl CNAME(forward_irq_toodeepcnt)
686 .globl CNAME(forward_irq_hitcnt)
687 .globl CNAME(resched_cpus)
688 .globl CNAME(want_resched_cnt)
689 .globl CNAME(cpuast_cnt)
690 .globl CNAME(cpustop_restartfunc)
691 CNAME(forward_irq_misscnt):
693 CNAME(forward_irq_hitcnt):
695 CNAME(forward_irq_toodeepcnt):
699 CNAME(want_resched_cnt):
703 CNAME(cpustop_restartfunc):
708 .globl _apic_pin_trigger