2 * from: vector.s, 386BSD 0.1 unknown origin
7 #include <machine/apic.h>
8 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
13 #ifdef FAST_SIMPLELOCK
15 #define GET_FAST_INTR_LOCK \
16 pushl $_fast_intr_lock ; /* address of lock */ \
17 call _s_lock ; /* MP-safe */ \
20 #define REL_FAST_INTR_LOCK \
21 movl $0, _fast_intr_lock
23 #else /* FAST_SIMPLELOCK */
25 #define GET_FAST_INTR_LOCK \
28 #define REL_FAST_INTR_LOCK \
29 movl $_mp_lock, %edx ; /* GIANT_LOCK */ \
32 #endif /* FAST_SIMPLELOCK */
34 /* convert an absolute IRQ# into a bitmask */
35 #define IRQ_BIT(irq_num) (1 << (irq_num))
37 /* make an index into the IO APIC from the IRQ# */
38 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
42 * Macros for interrupt interrupt entry, call to handler, and exit.
45 #ifdef FAST_WITHOUTCPL
49 #define FAST_INTR(irq_num, vec_name) \
53 pushl %eax ; /* save only call-used registers */ \
64 FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ; \
65 pushl _intr_unit + (irq_num) * 4 ; \
66 GET_FAST_INTR_LOCK ; \
67 call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
68 REL_FAST_INTR_LOCK ; \
70 movl $0, lapic_eoi ; \
72 incl _cnt+V_INTR ; /* book-keeping can wait */ \
73 movl _intr_countp + (irq_num) * 4, %eax ; \
85 #else /* FAST_WITHOUTCPL */
87 #define FAST_INTR(irq_num, vec_name) \
91 pushl %eax ; /* save only call-used registers */ \
100 movl $KPSEL, %eax ; \
102 FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ; \
103 GET_FAST_INTR_LOCK ; \
104 pushl _intr_unit + (irq_num) * 4 ; \
105 call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
107 movl $0, lapic_eoi ; \
109 incl _cnt+V_INTR ; /* book-keeping can wait */ \
110 movl _intr_countp + (irq_num) * 4,%eax ; \
113 movl _cpl, %eax ; /* unmasking pending HWIs or SWIs? */ \
115 andl _ipending, %eax ; \
116 jne 2f ; /* yes, maybe handle them */ \
119 REL_FAST_INTR_LOCK ; \
130 cmpb $3, _intr_nesting_level ; /* enough stack? */ \
131 jae 1b ; /* no, return */ \
133 /* XXX next line is probably unnecessary now. */ \
134 movl $HWI_MASK|SWI_MASK, _cpl ; /* limit nesting ... */ \
136 incb _intr_nesting_level ; /* ... really limit it ... */ \
137 sti ; /* to do this as early as possible */ \
138 popl %fs ; /* discard most of thin frame ... */ \
139 MAYBE_POPL_ES ; /* discard most of thin frame ... */ \
140 popl %ecx ; /* ... original %ds ... */ \
142 xchgl %eax, 4(%esp) ; /* orig %eax; save cpl */ \
143 pushal ; /* build fat frame (grrr) ... */ \
144 pushl %ecx ; /* ... actually %ds ... */ \
147 movl $KDSEL, %eax ; \
151 movl (3+8+0)*4(%esp), %ecx ; /* %ecx from thin frame ... */ \
152 movl %ecx, (3+6)*4(%esp) ; /* ... to fat frame ... */ \
153 movl (3+8+1)*4(%esp), %eax ; /* ... cpl from thin frame */ \
155 subl $4, %esp ; /* junk for unit number */ \
159 #endif /** FAST_WITHOUTCPL */
166 pushl $0 ; /* dummy error code */ \
167 pushl $0 ; /* dummy trap type */ \
169 pushl %ds ; /* save data and extra segments ... */ \
180 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
181 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
183 #define MASK_IRQ(irq_num) \
184 IMASK_LOCK ; /* into critical reg */ \
185 testl $IRQ_BIT(irq_num), _apic_imen ; \
186 jne 7f ; /* masked, don't mask */ \
187 orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
188 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
189 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
190 movl %eax, (%ecx) ; /* write the index */ \
191 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
192 orl $IOART_INTMASK, %eax ; /* set the mask */ \
193 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
194 7: ; /* already masked */ \
197 * Test to see whether we are handling an edge or level triggered INT.
198 * Level-triggered INTs must still be masked as we don't clear the source,
199 * and the EOI cycle would cause redundant INTs to occur.
201 #define MASK_LEVEL_IRQ(irq_num) \
202 testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
203 jz 9f ; /* edge, don't mask */ \
204 MASK_IRQ(irq_num) ; \
208 #ifdef APIC_INTR_REORDER
209 #define EOI_IRQ(irq_num) \
210 movl _apic_isrbit_location + 8 * (irq_num), %eax ; \
211 movl (%eax), %eax ; \
212 testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
213 jz 9f ; /* not active */ \
214 movl $0, lapic_eoi ; \
215 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
219 #define EOI_IRQ(irq_num) \
220 testl $IRQ_BIT(irq_num), lapic_isr1; \
221 jz 9f ; /* not active */ \
222 movl $0, lapic_eoi; \
223 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
229 * Test to see if the source is currntly masked, clear if so.
231 #define UNMASK_IRQ(irq_num) \
232 IMASK_LOCK ; /* into critical reg */ \
233 testl $IRQ_BIT(irq_num), _apic_imen ; \
234 je 7f ; /* bit clear, not masked */ \
235 andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \
236 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
237 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
238 movl %eax,(%ecx) ; /* write the index */ \
239 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
240 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
241 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
245 #ifdef INTR_SIMPLELOCK
248 #define LATELOCK call _get_isrlock
251 ISR_TRYLOCK ; /* XXX this is going away... */ \
252 testl %eax, %eax ; /* did we get it? */ \
254 #define DELOCK ISR_RELLOCK
258 #ifdef APIC_INTR_DIAGNOSTIC
259 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
263 pushl $CNAME(apic_itrace_debuglock)
264 call CNAME(s_lock_np)
266 movl CNAME(apic_itrace_debugbuffer_idx), %ecx
271 movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2)
274 movl %ecx, CNAME(apic_itrace_debugbuffer_idx)
275 pushl $CNAME(apic_itrace_debuglock)
276 call CNAME(s_unlock_np)
282 #define APIC_ITRACE(name, irq_num, id) \
283 lock ; /* MP-safe */ \
284 incl CNAME(name) + (irq_num) * 4 ; \
288 movl $(irq_num), %eax ; \
289 cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \
292 call log_intr_event ; \
299 #define APIC_ITRACE(name, irq_num, id) \
300 lock ; /* MP-safe */ \
301 incl CNAME(name) + (irq_num) * 4
304 #define APIC_ITRACE_ENTER 1
305 #define APIC_ITRACE_EOI 2
306 #define APIC_ITRACE_TRYISRLOCK 3
307 #define APIC_ITRACE_GOTISRLOCK 4
308 #define APIC_ITRACE_ENTER2 5
309 #define APIC_ITRACE_LEAVE 6
310 #define APIC_ITRACE_UNMASK 7
311 #define APIC_ITRACE_ACTIVE 8
312 #define APIC_ITRACE_MASKED 9
313 #define APIC_ITRACE_NOISRLOCK 10
314 #define APIC_ITRACE_MASKED2 11
315 #define APIC_ITRACE_SPLZ 12
316 #define APIC_ITRACE_DORETI 13
319 #define APIC_ITRACE(name, irq_num, id)
324 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
327 /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \
330 movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
333 movl $KPSEL, %eax ; \
336 maybe_extra_ipending ; \
338 APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
339 lock ; /* MP-safe */ \
340 btsl $(irq_num), iactive ; /* lazy masking */ \
341 jc 1f ; /* already active */ \
343 MASK_LEVEL_IRQ(irq_num) ; \
346 APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
349 APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
350 AVCPL_LOCK ; /* MP-safe */ \
351 testl $IRQ_BIT(irq_num), _cpl ; \
352 jne 2f ; /* this INT masked */ \
353 testl $IRQ_BIT(irq_num), _cml ; \
354 jne 2f ; /* this INT masked */ \
355 orl $IRQ_BIT(irq_num), _cil ; \
358 incb _intr_nesting_level ; \
360 /* entry point used by doreti_unpend for HWIs. */ \
361 __CONCAT(Xresume,irq_num): ; \
362 FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
363 lock ; incl _cnt+V_INTR ; /* tally interrupts */ \
364 movl _intr_countp + (irq_num) * 4, %eax ; \
365 lock ; incl (%eax) ; \
367 AVCPL_LOCK ; /* MP-safe */ \
370 orl _intr_mask + (irq_num) * 4, %eax ; \
374 pushl _intr_unit + (irq_num) * 4 ; \
375 incl _inside_intr ; \
376 APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
378 call *_intr_handler + (irq_num) * 4 ; \
380 APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
381 decl _inside_intr ; \
383 lock ; andl $~IRQ_BIT(irq_num), iactive ; \
384 lock ; andl $~IRQ_BIT(irq_num), _cil ; \
385 UNMASK_IRQ(irq_num) ; \
386 APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \
387 sti ; /* doreti repeats cli/sti */ \
394 APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \
395 MASK_IRQ(irq_num) ; \
397 AVCPL_LOCK ; /* MP-safe */ \
399 orl $IRQ_BIT(irq_num), _ipending ; \
402 btsl $(irq_num), iactive ; /* still active */ \
403 jnc 0b ; /* retry */ \
408 2: ; /* masked by cpl|cml */ \
409 APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \
411 orl $IRQ_BIT(irq_num), _ipending ; \
413 DELOCK ; /* XXX this is going away... */ \
417 3: ; /* other cpu has isr lock */ \
418 APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
419 AVCPL_LOCK ; /* MP-safe */ \
421 orl $IRQ_BIT(irq_num), _ipending ; \
422 testl $IRQ_BIT(irq_num), _cpl ; \
423 jne 4f ; /* this INT masked */ \
424 testl $IRQ_BIT(irq_num), _cml ; \
425 jne 4f ; /* this INT masked */ \
426 orl $IRQ_BIT(irq_num), _cil ; \
428 call forward_irq ; /* forward irq to lock holder */ \
429 POP_FRAME ; /* and return */ \
433 APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
435 POP_FRAME ; /* and return */ \
438 #else /* CPL_AND_CML */
441 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
444 /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \
447 movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
450 movl $KPSEL, %eax ; \
453 maybe_extra_ipending ; \
455 APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
456 lock ; /* MP-safe */ \
457 btsl $(irq_num), iactive ; /* lazy masking */ \
458 jc 1f ; /* already active */ \
460 MASK_LEVEL_IRQ(irq_num) ; \
463 APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
464 ISR_TRYLOCK ; /* XXX this is going away... */ \
465 testl %eax, %eax ; /* did we get it? */ \
468 APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
469 AVCPL_LOCK ; /* MP-safe */ \
470 testl $IRQ_BIT(irq_num), _cpl ; \
471 jne 2f ; /* this INT masked */ \
474 incb _intr_nesting_level ; \
476 /* entry point used by doreti_unpend for HWIs. */ \
477 __CONCAT(Xresume,irq_num): ; \
478 FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
479 lock ; incl _cnt+V_INTR ; /* tally interrupts */ \
480 movl _intr_countp + (irq_num) * 4, %eax ; \
481 lock ; incl (%eax) ; \
483 AVCPL_LOCK ; /* MP-safe */ \
486 orl _intr_mask + (irq_num) * 4, %eax ; \
489 andl $~IRQ_BIT(irq_num), _ipending ; \
492 pushl _intr_unit + (irq_num) * 4 ; \
493 APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
495 call *_intr_handler + (irq_num) * 4 ; \
497 APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
499 lock ; andl $~IRQ_BIT(irq_num), iactive ; \
500 UNMASK_IRQ(irq_num) ; \
501 APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \
502 sti ; /* doreti repeats cli/sti */ \
508 APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \
509 MASK_IRQ(irq_num) ; \
511 AVCPL_LOCK ; /* MP-safe */ \
513 orl $IRQ_BIT(irq_num), _ipending ; \
516 btsl $(irq_num), iactive ; /* still active */ \
517 jnc 0b ; /* retry */ \
519 iret ; /* XXX: iactive bit might be 0 now */ \
521 2: ; /* masked by cpl, leave iactive set */ \
522 APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \
524 orl $IRQ_BIT(irq_num), _ipending ; \
526 ISR_RELLOCK ; /* XXX this is going away... */ \
530 3: ; /* other cpu has isr lock */ \
531 APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
532 AVCPL_LOCK ; /* MP-safe */ \
534 orl $IRQ_BIT(irq_num), _ipending ; \
535 testl $IRQ_BIT(irq_num), _cpl ; \
536 jne 4f ; /* this INT masked */ \
538 call forward_irq ; /* forward irq to lock holder */ \
539 POP_FRAME ; /* and return */ \
543 APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
545 POP_FRAME ; /* and return */ \
548 #endif /* CPL_AND_CML */
552 * Handle "spurious INTerrupts".
554 * This is different than the "spurious INTerrupt" generated by an
555 * 8259 PIC for missing INTs. See the APIC documentation for details.
556 * This routine should NOT do an 'EOI' cycle.
563 /* No EOI cycle used here */
569 * Handle TLB shootdowns.
577 #ifdef COUNT_XINVLTLB_HITS
585 #endif /* COUNT_XINVLTLB_HITS */
587 movl %cr3, %eax /* invalidate the TLB */
590 ss /* stack segment, avoid %ds load */
591 movl $0, lapic_eoi /* End Of Interrupt to APIC */
600 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
602 * - Stores current cpu state in checkstate_cpustate[cpuid]
603 * 0 == user, 1 == sys, 2 == intr
604 * - Stores current process in checkstate_curproc[cpuid]
606 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
608 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
613 .globl _Xcpucheckstate
614 .globl _checkstate_cpustate
615 .globl _checkstate_curproc
616 .globl _checkstate_pc
620 pushl %ds /* save current data segment */
624 movl %ax, %ds /* use KERNEL data segment */
628 movl $0, lapic_eoi /* End Of Interrupt to APIC */
635 testl $PSL_VM, 24(%esp)
637 incl %ebx /* system or interrupt */
639 cmpl $0, _inside_intr
641 incl %ebx /* interrupt */
645 movl %ebx, _checkstate_cpustate(,%eax,4)
647 movl %ebx, _checkstate_curproc(,%eax,4)
649 movl %ebx, _checkstate_pc(,%eax,4)
651 lock /* checkstate_probed_cpus |= (1<<id) */
652 btsl %eax, _checkstate_probed_cpus
655 popl %ds /* restore previous data segment */
660 #endif /* BETTER_CLOCK */
663 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
665 * - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
667 * - We need a better method of triggering asts on other cpus.
676 movl %ax, %ds /* use KERNEL data segment */
682 lock /* checkstate_need_ast &= ~(1<<id) */
683 btrl %eax, _checkstate_need_ast
684 movl $0, lapic_eoi /* End Of Interrupt to APIC */
687 btsl %eax, _checkstate_pending_ast
690 FAKE_MCOUNT(13*4(%esp))
693 * Giant locks do not come cheap.
694 * A lot of cycles are going to be wasted here.
705 movl $1, _astpending /* XXX */
708 incb _intr_nesting_level
715 btrl %eax, _checkstate_pending_ast
717 btrl %eax, CNAME(resched_cpus)
719 movl $1, CNAME(want_resched)
721 incl CNAME(want_resched_cnt)
724 incl CNAME(cpuast_cnt)
728 /* We are already in the process of delivering an ast for this CPU */
734 * Executed by a CPU when it receives an XFORWARD_IRQ IPI.
743 movl %ax, %ds /* use KERNEL data segment */
748 movl $0, lapic_eoi /* End Of Interrupt to APIC */
750 FAKE_MCOUNT(13*4(%esp))
753 testl %eax,%eax /* Did we get the lock ? */
757 incl CNAME(forward_irq_hitcnt)
758 cmpb $4, _intr_nesting_level
770 incb _intr_nesting_level
776 jmp _doreti /* Handle forwarded interrupt */
779 incl CNAME(forward_irq_misscnt)
780 call forward_irq /* Oops, we've lost the isr lock */
786 incl CNAME(forward_irq_toodeepcnt)
801 cmpl $0, CNAME(forward_irq_enabled)
807 movl $0, %eax /* Pick CPU #0 if noone has lock */
810 movl _cpu_num_to_apic_id(,%eax,4),%ecx
812 movl lapic_icr_hi, %eax
813 andl $~APIC_ID_MASK, %eax
815 movl %eax, lapic_icr_hi
818 movl lapic_icr_lo, %eax
819 andl $APIC_DELSTAT_MASK,%eax
821 movl lapic_icr_lo, %eax
822 andl $APIC_RESV2_MASK, %eax
823 orl $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
824 movl %eax, lapic_icr_lo
826 movl lapic_icr_lo, %eax
827 andl $APIC_DELSTAT_MASK,%eax
833 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
835 * - Signals its receipt.
836 * - Waits for permission to restart.
837 * - Signals its restart.
849 pushl %ds /* save current data segment */
853 movl %ax, %ds /* use KERNEL data segment */
857 movl $0, lapic_eoi /* End Of Interrupt to APIC */
860 imull $PCB_SIZE, %eax
861 leal CNAME(stoppcbs)(%eax), %eax
863 call CNAME(savectx) /* Save process context */
870 btsl %eax, _stopped_cpus /* stopped_cpus |= (1<<id) */
872 btl %eax, _started_cpus /* while (!(started_cpus & (1<<id))) */
876 btrl %eax, _started_cpus /* started_cpus &= ~(1<<id) */
878 btrl %eax, _stopped_cpus /* stopped_cpus &= ~(1<<id) */
883 movl CNAME(cpustop_restartfunc), %eax
886 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
891 popl %ds /* restore previous data segment */
901 FAST_INTR(0,fastintr0)
902 FAST_INTR(1,fastintr1)
903 FAST_INTR(2,fastintr2)
904 FAST_INTR(3,fastintr3)
905 FAST_INTR(4,fastintr4)
906 FAST_INTR(5,fastintr5)
907 FAST_INTR(6,fastintr6)
908 FAST_INTR(7,fastintr7)
909 FAST_INTR(8,fastintr8)
910 FAST_INTR(9,fastintr9)
911 FAST_INTR(10,fastintr10)
912 FAST_INTR(11,fastintr11)
913 FAST_INTR(12,fastintr12)
914 FAST_INTR(13,fastintr13)
915 FAST_INTR(14,fastintr14)
916 FAST_INTR(15,fastintr15)
917 FAST_INTR(16,fastintr16)
918 FAST_INTR(17,fastintr17)
919 FAST_INTR(18,fastintr18)
920 FAST_INTR(19,fastintr19)
921 FAST_INTR(20,fastintr20)
922 FAST_INTR(21,fastintr21)
923 FAST_INTR(22,fastintr22)
924 FAST_INTR(23,fastintr23)
925 #define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
926 INTR(0,intr0, CLKINTR_PENDING)
953 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
955 * - Calls the generic rendezvous action function.
963 movl %ax, %ds /* use KERNEL data segment */
968 call _smp_rendezvous_action
970 movl $0, lapic_eoi /* End Of Interrupt to APIC */
977 * Addresses of interrupt handlers.
978 * XresumeNN: Resumption addresses for HWIs.
984 * ipl.s: doreti_unpend
986 .long Xresume0, Xresume1, Xresume2, Xresume3
987 .long Xresume4, Xresume5, Xresume6, Xresume7
988 .long Xresume8, Xresume9, Xresume10, Xresume11
989 .long Xresume12, Xresume13, Xresume14, Xresume15
990 .long Xresume16, Xresume17, Xresume18, Xresume19
991 .long Xresume20, Xresume21, Xresume22, Xresume23
994 * ipl.s: doreti_unpend
995 * apic_ipl.s: splz_unpend
997 .long _swi_null, swi_net, _swi_null, _swi_null
998 .long _swi_vm, _swi_null, _softclock, _swi_null
1000 imasks: /* masks for interrupt handlers */
1001 .space NHWI*4 /* padding; HWI masks are elsewhere */
1003 .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
1004 .long SWI_VM_MASK, 0, SWI_CLOCK_MASK, 0
1006 /* active flag for lazy masking */
1010 #ifdef COUNT_XINVLTLB_HITS
1013 .space (NCPU * 4), 0
1014 #endif /* COUNT_XINVLTLB_HITS */
1016 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
1017 .globl _stopped_cpus, _started_cpus
1024 .globl _checkstate_probed_cpus
1025 _checkstate_probed_cpus:
1027 #endif /* BETTER_CLOCK */
1028 .globl _checkstate_need_ast
1029 _checkstate_need_ast:
1031 _checkstate_pending_ast:
1033 .globl CNAME(forward_irq_misscnt)
1034 .globl CNAME(forward_irq_toodeepcnt)
1035 .globl CNAME(forward_irq_hitcnt)
1036 .globl CNAME(resched_cpus)
1037 .globl CNAME(want_resched_cnt)
1038 .globl CNAME(cpuast_cnt)
1039 .globl CNAME(cpustop_restartfunc)
1040 CNAME(forward_irq_misscnt):
1042 CNAME(forward_irq_hitcnt):
1044 CNAME(forward_irq_toodeepcnt):
1046 CNAME(resched_cpus):
1048 CNAME(want_resched_cnt):
1052 CNAME(cpustop_restartfunc):
1057 .globl _apic_pin_trigger