]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/i386/isa/apic_vector.s
This commit was generated by cvs2svn to compensate for changes in r56893,
[FreeBSD/FreeBSD.git] / sys / i386 / isa / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD$
4  */
5
6
7 #include <machine/apic.h>
8 #include <machine/smp.h>
9
10 #include "i386/isa/intr_machdep.h"
11
12
13 #ifdef FAST_SIMPLELOCK
14
15 #define GET_FAST_INTR_LOCK                                              \
16         pushl   $_fast_intr_lock ;              /* address of lock */   \
17         call    _s_lock ;                       /* MP-safe */           \
18         addl    $4,%esp
19
20 #define REL_FAST_INTR_LOCK                                              \
21         movl    $0, _fast_intr_lock
22
23 #else /* FAST_SIMPLELOCK */
24
25 #define GET_FAST_INTR_LOCK                                              \
26         call    _get_isrlock
27
28 #define REL_FAST_INTR_LOCK                                              \
29         movl    $_mp_lock, %edx ; /* GIANT_LOCK */                      \
30         call    _MPrellock_edx
31
32 #endif /* FAST_SIMPLELOCK */
33
34 /* convert an absolute IRQ# into a bitmask */
35 #define IRQ_BIT(irq_num)        (1 << (irq_num))
36
37 /* make an index into the IO APIC from the IRQ# */
38 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
39
40
41 /*
42  * Macros for interrupt interrupt entry, call to handler, and exit.
43  */
44
45 #ifdef FAST_WITHOUTCPL
46
47 /*
48  */
49 #define FAST_INTR(irq_num, vec_name)                                    \
50         .text ;                                                         \
51         SUPERALIGN_TEXT ;                                               \
52 IDTVEC(vec_name) ;                                                      \
53         pushl   %eax ;          /* save only call-used registers */     \
54         pushl   %ecx ;                                                  \
55         pushl   %edx ;                                                  \
56         pushl   %ds ;                                                   \
57         MAYBE_PUSHL_ES ;                                                \
58         pushl   %fs ;                                                   \
59         movl    $KDSEL,%eax ;                                           \
60         movl    %ax,%ds ;                                               \
61         MAYBE_MOVW_AX_ES ;                                              \
62         movl    $KPSEL,%eax ;                                           \
63         movl    %ax,%fs ;                                               \
64         FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;                      \
65         pushl   _intr_unit + (irq_num) * 4 ;                            \
66         GET_FAST_INTR_LOCK ;                                            \
67         call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
68         REL_FAST_INTR_LOCK ;                                            \
69         addl    $4, %esp ;                                              \
70         movl    $0, lapic_eoi ;                                         \
71         lock ;                                                          \
72         incl    _cnt+V_INTR ;   /* book-keeping can wait */             \
73         movl    _intr_countp + (irq_num) * 4, %eax ;                    \
74         lock ;                                                          \
75         incl    (%eax) ;                                                \
76         MEXITCOUNT ;                                                    \
77         popl    %fs ;                                                   \
78         MAYBE_POPL_ES ;                                                 \
79         popl    %ds ;                                                   \
80         popl    %edx ;                                                  \
81         popl    %ecx ;                                                  \
82         popl    %eax ;                                                  \
83         iret
84
85 #else /* FAST_WITHOUTCPL */
86
87 #define FAST_INTR(irq_num, vec_name)                                    \
88         .text ;                                                         \
89         SUPERALIGN_TEXT ;                                               \
90 IDTVEC(vec_name) ;                                                      \
91         pushl   %eax ;          /* save only call-used registers */     \
92         pushl   %ecx ;                                                  \
93         pushl   %edx ;                                                  \
94         pushl   %ds ;                                                   \
95         MAYBE_PUSHL_ES ;                                                \
96         pushl   %fs ;                                                   \
97         movl    $KDSEL, %eax ;                                          \
98         movl    %ax, %ds ;                                              \
99         MAYBE_MOVW_AX_ES ;                                              \
100         movl    $KPSEL, %eax ;                                          \
101         movl    %ax, %fs ;                                              \
102         FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;                      \
103         GET_FAST_INTR_LOCK ;                                            \
104         pushl   _intr_unit + (irq_num) * 4 ;                            \
105         call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
106         addl    $4, %esp ;                                              \
107         movl    $0, lapic_eoi ;                                         \
108         lock ;                                                          \
109         incl    _cnt+V_INTR ;   /* book-keeping can wait */             \
110         movl    _intr_countp + (irq_num) * 4,%eax ;                     \
111         lock ;                                                          \
112         incl    (%eax) ;                                                \
113         movl    _cpl, %eax ;    /* unmasking pending HWIs or SWIs? */   \
114         notl    %eax ;                                                  \
115         andl    _ipending, %eax ;                                       \
116         jne     2f ;            /* yes, maybe handle them */            \
117 1: ;                                                                    \
118         MEXITCOUNT ;                                                    \
119         REL_FAST_INTR_LOCK ;                                            \
120         popl    %fs ;                                                   \
121         MAYBE_POPL_ES ;                                                 \
122         popl    %ds ;                                                   \
123         popl    %edx ;                                                  \
124         popl    %ecx ;                                                  \
125         popl    %eax ;                                                  \
126         iret ;                                                          \
127 ;                                                                       \
128         ALIGN_TEXT ;                                                    \
129 2: ;                                                                    \
130         cmpb    $3, _intr_nesting_level ;       /* enough stack? */     \
131         jae     1b ;            /* no, return */                        \
132         movl    _cpl, %eax ;                                            \
133         /* XXX next line is probably unnecessary now. */                \
134         movl    $HWI_MASK|SWI_MASK, _cpl ;      /* limit nesting ... */ \
135         lock ;                                                          \
136         incb    _intr_nesting_level ;   /* ... really limit it ... */   \
137         sti ;                   /* to do this as early as possible */   \
138         popl    %fs ;           /* discard most of thin frame ... */    \
139         MAYBE_POPL_ES ;         /* discard most of thin frame ... */    \
140         popl    %ecx ;          /* ... original %ds ... */              \
141         popl    %edx ;                                                  \
142         xchgl   %eax, 4(%esp) ; /* orig %eax; save cpl */               \
143         pushal ;                /* build fat frame (grrr) ... */        \
144         pushl   %ecx ;          /* ... actually %ds ... */              \
145         pushl   %es ;                                                   \
146         pushl   %fs ;
147         movl    $KDSEL, %eax ;                                          \
148         movl    %ax, %es ;                                              \
149         movl    $KPSEL, %eax ;
150         movl    %ax, %fs ;
151         movl    (3+8+0)*4(%esp), %ecx ; /* %ecx from thin frame ... */  \
152         movl    %ecx, (3+6)*4(%esp) ;   /* ... to fat frame ... */      \
153         movl    (3+8+1)*4(%esp), %eax ; /* ... cpl from thin frame */   \
154         pushl   %eax ;                                                  \
155         subl    $4, %esp ;      /* junk for unit number */              \
156         MEXITCOUNT ;                                                    \
157         jmp     _doreti
158
159 #endif /** FAST_WITHOUTCPL */
160
161
162 /*
163  * 
164  */
165 #define PUSH_FRAME                                                      \
166         pushl   $0 ;            /* dummy error code */                  \
167         pushl   $0 ;            /* dummy trap type */                   \
168         pushal ;                                                        \
169         pushl   %ds ;           /* save data and extra segments ... */  \
170         pushl   %es ;                                                   \
171         pushl   %fs
172
173 #define POP_FRAME                                                       \
174         popl    %fs ;                                                   \
175         popl    %es ;                                                   \
176         popl    %ds ;                                                   \
177         popal ;                                                         \
178         addl    $4+4,%esp
179
180 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
181 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
182         
183 #define MASK_IRQ(irq_num)                                               \
184         IMASK_LOCK ;                            /* into critical reg */ \
185         testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
186         jne     7f ;                    /* masked, don't mask */        \
187         orl     $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */  \
188         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
189         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
190         movl    %eax, (%ecx) ;                  /* write the index */   \
191         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
192         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
193         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
194 7: ;                                            /* already masked */    \
195         IMASK_UNLOCK
196 /*
197  * Test to see whether we are handling an edge or level triggered INT.
198  *  Level-triggered INTs must still be masked as we don't clear the source,
199  *  and the EOI cycle would cause redundant INTs to occur.
200  */
201 #define MASK_LEVEL_IRQ(irq_num)                                         \
202         testl   $IRQ_BIT(irq_num), _apic_pin_trigger ;                  \
203         jz      9f ;                            /* edge, don't mask */  \
204         MASK_IRQ(irq_num) ;                                             \
205 9:
206
207
208 #ifdef APIC_INTR_REORDER
209 #define EOI_IRQ(irq_num)                                                \
210         movl    _apic_isrbit_location + 8 * (irq_num), %eax ;           \
211         movl    (%eax), %eax ;                                          \
212         testl   _apic_isrbit_location + 4 + 8 * (irq_num), %eax ;       \
213         jz      9f ;                            /* not active */        \
214         movl    $0, lapic_eoi ;                                         \
215         APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
216 9:
217
218 #else
219 #define EOI_IRQ(irq_num)                                                \
220         testl   $IRQ_BIT(irq_num), lapic_isr1;                          \
221         jz      9f      ;                       /* not active */        \
222         movl    $0, lapic_eoi;                                          \
223         APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
224 9:
225 #endif
226         
227         
228 /*
229  * Test to see if the source is currntly masked, clear if so.
230  */
231 #define UNMASK_IRQ(irq_num)                                     \
232         IMASK_LOCK ;                            /* into critical reg */ \
233         testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
234         je      7f ;                    /* bit clear, not masked */     \
235         andl    $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */    \
236         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
237         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
238         movl    %eax,(%ecx) ;                   /* write the index */   \
239         movl    IOAPIC_WINDOW(%ecx),%eax ;      /* current value */     \
240         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
241         movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
242 7: ;                                                                    \
243         IMASK_UNLOCK
244
245 #ifdef INTR_SIMPLELOCK
246 #define ENLOCK
247 #define DELOCK
248 #define LATELOCK call   _get_isrlock
249 #else
250 #define ENLOCK \
251         ISR_TRYLOCK ;           /* XXX this is going away... */         \
252         testl   %eax, %eax ;                    /* did we get it? */    \
253         jz      3f
254 #define DELOCK  ISR_RELLOCK
255 #define LATELOCK
256 #endif
257
258 #ifdef APIC_INTR_DIAGNOSTIC
259 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
260 log_intr_event:
261         pushf
262         cli
263         pushl   $CNAME(apic_itrace_debuglock)
264         call    CNAME(s_lock_np)
265         addl    $4, %esp
266         movl    CNAME(apic_itrace_debugbuffer_idx), %ecx
267         andl    $32767, %ecx
268         movl    _cpuid, %eax
269         shll    $8,     %eax
270         orl     8(%esp), %eax
271         movw    %ax,    CNAME(apic_itrace_debugbuffer)(,%ecx,2)
272         incl    %ecx
273         andl    $32767, %ecx
274         movl    %ecx,   CNAME(apic_itrace_debugbuffer_idx)
275         pushl   $CNAME(apic_itrace_debuglock)
276         call    CNAME(s_unlock_np)
277         addl    $4, %esp
278         popf
279         ret
280         
281
282 #define APIC_ITRACE(name, irq_num, id)                                  \
283         lock ;                                  /* MP-safe */           \
284         incl    CNAME(name) + (irq_num) * 4 ;                           \
285         pushl   %eax ;                                                  \
286         pushl   %ecx ;                                                  \
287         pushl   %edx ;                                                  \
288         movl    $(irq_num), %eax ;                                      \
289         cmpl    $APIC_INTR_DIAGNOSTIC_IRQ, %eax ;                       \
290         jne     7f ;                                                    \
291         pushl   $id ;                                                   \
292         call    log_intr_event ;                                        \
293         addl    $4, %esp ;                                              \
294 7: ;                                                                    \
295         popl    %edx ;                                                  \
296         popl    %ecx ;                                                  \
297         popl    %eax
298 #else
299 #define APIC_ITRACE(name, irq_num, id)                                  \
300         lock ;                                  /* MP-safe */           \
301         incl    CNAME(name) + (irq_num) * 4
302 #endif
303
304 #define APIC_ITRACE_ENTER 1
305 #define APIC_ITRACE_EOI 2
306 #define APIC_ITRACE_TRYISRLOCK 3
307 #define APIC_ITRACE_GOTISRLOCK 4
308 #define APIC_ITRACE_ENTER2 5
309 #define APIC_ITRACE_LEAVE 6
310 #define APIC_ITRACE_UNMASK 7
311 #define APIC_ITRACE_ACTIVE 8
312 #define APIC_ITRACE_MASKED 9
313 #define APIC_ITRACE_NOISRLOCK 10
314 #define APIC_ITRACE_MASKED2 11
315 #define APIC_ITRACE_SPLZ 12
316 #define APIC_ITRACE_DORETI 13   
317         
318 #else   
319 #define APIC_ITRACE(name, irq_num, id)
320 #endif
321                 
322 #ifdef CPL_AND_CML
323
324 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
325         .text ;                                                         \
326         SUPERALIGN_TEXT ;                                               \
327 /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */  \
328 IDTVEC(vec_name) ;                                                      \
329         PUSH_FRAME ;                                                    \
330         movl    $KDSEL, %eax ;  /* reload with kernel's data segment */ \
331         movl    %ax, %ds ;                                              \
332         movl    %ax, %es ;                                              \
333         movl    $KPSEL, %eax ;                                          \
334         movl    %ax, %fs ;                                              \
335 ;                                                                       \
336         maybe_extra_ipending ;                                          \
337 ;                                                                       \
338         APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;    \
339         lock ;                                  /* MP-safe */           \
340         btsl    $(irq_num), iactive ;           /* lazy masking */      \
341         jc      1f ;                            /* already active */    \
342 ;                                                                       \
343         MASK_LEVEL_IRQ(irq_num) ;                                       \
344         EOI_IRQ(irq_num) ;                                              \
345 0: ;                                                                    \
346         APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
347         ENLOCK ;                                                        \
348 ;                                                                       \
349         APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
350         AVCPL_LOCK ;                            /* MP-safe */           \
351         testl   $IRQ_BIT(irq_num), _cpl ;                               \
352         jne     2f ;                            /* this INT masked */   \
353         testl   $IRQ_BIT(irq_num), _cml ;                               \
354         jne     2f ;                            /* this INT masked */   \
355         orl     $IRQ_BIT(irq_num), _cil ;                               \
356         AVCPL_UNLOCK ;                                                  \
357 ;                                                                       \
358         incb    _intr_nesting_level ;                                   \
359 ;                                                                       \
360   /* entry point used by doreti_unpend for HWIs. */                     \
361 __CONCAT(Xresume,irq_num): ;                                            \
362         FAKE_MCOUNT(13*4(%esp)) ;               /* XXX avoid dbl cnt */ \
363         lock ;  incl    _cnt+V_INTR ;           /* tally interrupts */  \
364         movl    _intr_countp + (irq_num) * 4, %eax ;                    \
365         lock ;  incl    (%eax) ;                                        \
366 ;                                                                       \
367         AVCPL_LOCK ;                            /* MP-safe */           \
368         movl    _cml, %eax ;                                            \
369         pushl   %eax ;                                                  \
370         orl     _intr_mask + (irq_num) * 4, %eax ;                      \
371         movl    %eax, _cml ;                                            \
372         AVCPL_UNLOCK ;                                                  \
373 ;                                                                       \
374         pushl   _intr_unit + (irq_num) * 4 ;                            \
375         incl    _inside_intr ;                                          \
376         APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;  \
377         sti ;                                                           \
378         call    *_intr_handler + (irq_num) * 4 ;                        \
379         cli ;                                                           \
380         APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;    \
381         decl    _inside_intr ;                                          \
382 ;                                                                       \
383         lock ;  andl $~IRQ_BIT(irq_num), iactive ;                      \
384         lock ;  andl $~IRQ_BIT(irq_num), _cil ;                         \
385         UNMASK_IRQ(irq_num) ;                                           \
386         APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;  \
387         sti ;                           /* doreti repeats cli/sti */    \
388         MEXITCOUNT ;                                                    \
389         LATELOCK ;                                                      \
390         jmp     _doreti ;                                               \
391 ;                                                                       \
392         ALIGN_TEXT ;                                                    \
393 1: ;                                            /* active */            \
394         APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;  \
395         MASK_IRQ(irq_num) ;                                             \
396         EOI_IRQ(irq_num) ;                                              \
397         AVCPL_LOCK ;                            /* MP-safe */           \
398         lock ;                                                          \
399         orl     $IRQ_BIT(irq_num), _ipending ;                          \
400         AVCPL_UNLOCK ;                                                  \
401         lock ;                                                          \
402         btsl    $(irq_num), iactive ;           /* still active */      \
403         jnc     0b ;                            /* retry */             \
404         POP_FRAME ;                                                     \
405         iret ;                                                          \
406 ;                                                                       \
407         ALIGN_TEXT ;                                                    \
408 2: ;                                            /* masked by cpl|cml */ \
409         APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;  \
410         lock ;                                                          \
411         orl     $IRQ_BIT(irq_num), _ipending ;                          \
412         AVCPL_UNLOCK ;                                                  \
413         DELOCK ;                /* XXX this is going away... */         \
414         POP_FRAME ;                                                     \
415         iret ;                                                          \
416         ALIGN_TEXT ;                                                    \
417 3: ;                    /* other cpu has isr lock */                    \
418         APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
419         AVCPL_LOCK ;                            /* MP-safe */           \
420         lock ;                                                          \
421         orl     $IRQ_BIT(irq_num), _ipending ;                          \
422         testl   $IRQ_BIT(irq_num), _cpl ;                               \
423         jne     4f ;                            /* this INT masked */   \
424         testl   $IRQ_BIT(irq_num), _cml ;                               \
425         jne     4f ;                            /* this INT masked */   \
426         orl     $IRQ_BIT(irq_num), _cil ;                               \
427         AVCPL_UNLOCK ;                                                  \
428         call    forward_irq ;   /* forward irq to lock holder */        \
429         POP_FRAME ;                             /* and return */        \
430         iret ;                                                          \
431         ALIGN_TEXT ;                                                    \
432 4: ;                                            /* blocked */           \
433         APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
434         AVCPL_UNLOCK ;                                                  \
435         POP_FRAME ;                             /* and return */        \
436         iret
437
438 #else /* CPL_AND_CML */
439
440
441 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
442         .text ;                                                         \
443         SUPERALIGN_TEXT ;                                               \
444 /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */  \
445 IDTVEC(vec_name) ;                                                      \
446         PUSH_FRAME ;                                                    \
447         movl    $KDSEL, %eax ;  /* reload with kernel's data segment */ \
448         movl    %ax, %ds ;                                              \
449         movl    %ax, %es ;                                              \
450         movl    $KPSEL, %eax ;                                          \
451         movl    %ax, %fs ;                                              \
452 ;                                                                       \
453         maybe_extra_ipending ;                                          \
454 ;                                                                       \
455         APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;    \
456         lock ;                                  /* MP-safe */           \
457         btsl    $(irq_num), iactive ;           /* lazy masking */      \
458         jc      1f ;                            /* already active */    \
459 ;                                                                       \
460         MASK_LEVEL_IRQ(irq_num) ;                                       \
461         EOI_IRQ(irq_num) ;                                              \
462 0: ;                                                                    \
463         APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
464         ISR_TRYLOCK ;           /* XXX this is going away... */         \
465         testl   %eax, %eax ;                    /* did we get it? */    \
466         jz      3f ;                            /* no */                \
467 ;                                                                       \
468         APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
469         AVCPL_LOCK ;                            /* MP-safe */           \
470         testl   $IRQ_BIT(irq_num), _cpl ;                               \
471         jne     2f ;                            /* this INT masked */   \
472         AVCPL_UNLOCK ;                                                  \
473 ;                                                                       \
474         incb    _intr_nesting_level ;                                   \
475 ;                                                                       \
476   /* entry point used by doreti_unpend for HWIs. */                     \
477 __CONCAT(Xresume,irq_num): ;                                            \
478         FAKE_MCOUNT(13*4(%esp)) ;               /* XXX avoid dbl cnt */ \
479         lock ;  incl    _cnt+V_INTR ;           /* tally interrupts */  \
480         movl    _intr_countp + (irq_num) * 4, %eax ;                    \
481         lock ;  incl    (%eax) ;                                        \
482 ;                                                                       \
483         AVCPL_LOCK ;                            /* MP-safe */           \
484         movl    _cpl, %eax ;                                            \
485         pushl   %eax ;                                                  \
486         orl     _intr_mask + (irq_num) * 4, %eax ;                      \
487         movl    %eax, _cpl ;                                            \
488         lock ;                                                          \
489         andl    $~IRQ_BIT(irq_num), _ipending ;                         \
490         AVCPL_UNLOCK ;                                                  \
491 ;                                                                       \
492         pushl   _intr_unit + (irq_num) * 4 ;                            \
493         APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;  \
494         sti ;                                                           \
495         call    *_intr_handler + (irq_num) * 4 ;                        \
496         cli ;                                                           \
497         APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;    \
498 ;                                                                       \
499         lock ;  andl    $~IRQ_BIT(irq_num), iactive ;                   \
500         UNMASK_IRQ(irq_num) ;                                           \
501         APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;  \
502         sti ;                           /* doreti repeats cli/sti */    \
503         MEXITCOUNT ;                                                    \
504         jmp     _doreti ;                                               \
505 ;                                                                       \
506         ALIGN_TEXT ;                                                    \
507 1: ;                                            /* active  */           \
508         APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;  \
509         MASK_IRQ(irq_num) ;                                             \
510         EOI_IRQ(irq_num) ;                                              \
511         AVCPL_LOCK ;                            /* MP-safe */           \
512         lock ;                                                          \
513         orl     $IRQ_BIT(irq_num), _ipending ;                          \
514         AVCPL_UNLOCK ;                                                  \
515         lock ;                                                          \
516         btsl    $(irq_num), iactive ;           /* still active */      \
517         jnc     0b ;                            /* retry */             \
518         POP_FRAME ;                                                     \
519         iret ;          /* XXX:  iactive bit might be 0 now */          \
520         ALIGN_TEXT ;                                                    \
521 2: ;                            /* masked by cpl, leave iactive set */  \
522         APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;  \
523         lock ;                                                          \
524         orl     $IRQ_BIT(irq_num), _ipending ;                          \
525         AVCPL_UNLOCK ;                                                  \
526         ISR_RELLOCK ;           /* XXX this is going away... */         \
527         POP_FRAME ;                                                     \
528         iret ;                                                          \
529         ALIGN_TEXT ;                                                    \
530 3: ;                    /* other cpu has isr lock */                    \
531         APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
532         AVCPL_LOCK ;                            /* MP-safe */           \
533         lock ;                                                          \
534         orl     $IRQ_BIT(irq_num), _ipending ;                          \
535         testl   $IRQ_BIT(irq_num), _cpl ;                               \
536         jne     4f ;                            /* this INT masked */   \
537         AVCPL_UNLOCK ;                                                  \
538         call    forward_irq ;    /* forward irq to lock holder */       \
539         POP_FRAME ;                             /* and return */        \
540         iret ;                                                          \
541         ALIGN_TEXT ;                                                    \
542 4: ;                                            /* blocked */           \
543         APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
544         AVCPL_UNLOCK ;                                                  \
545         POP_FRAME ;                             /* and return */        \
546         iret
547
548 #endif /* CPL_AND_CML */
549
550
551 /*
552  * Handle "spurious INTerrupts".
553  * Notes:
554  *  This is different than the "spurious INTerrupt" generated by an
555  *   8259 PIC for missing INTs.  See the APIC documentation for details.
556  *  This routine should NOT do an 'EOI' cycle.
557  */
558         .text
559         SUPERALIGN_TEXT
560         .globl _Xspuriousint
561 _Xspuriousint:
562
563         /* No EOI cycle used here */
564
565         iret
566
567
568 /*
569  * Handle TLB shootdowns.
570  */
571         .text
572         SUPERALIGN_TEXT
573         .globl  _Xinvltlb
574 _Xinvltlb:
575         pushl   %eax
576
577 #ifdef COUNT_XINVLTLB_HITS
578         pushl   %fs
579         movl    $KPSEL, %eax
580         movl    %ax, %fs
581         movl    _cpuid, %eax
582         popl    %fs
583         ss
584         incl    _xhits(,%eax,4)
585 #endif /* COUNT_XINVLTLB_HITS */
586
587         movl    %cr3, %eax              /* invalidate the TLB */
588         movl    %eax, %cr3
589
590         ss                              /* stack segment, avoid %ds load */
591         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
592
593         popl    %eax
594         iret
595
596
597 #ifdef BETTER_CLOCK
598
599 /*
600  * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
601  *
602  *  - Stores current cpu state in checkstate_cpustate[cpuid]
603  *      0 == user, 1 == sys, 2 == intr
604  *  - Stores current process in checkstate_curproc[cpuid]
605  *
606  *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
607  *
608  * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
609  */
610
611         .text
612         SUPERALIGN_TEXT
613         .globl _Xcpucheckstate
614         .globl _checkstate_cpustate
615         .globl _checkstate_curproc
616         .globl _checkstate_pc
617 _Xcpucheckstate:
618         pushl   %eax
619         pushl   %ebx            
620         pushl   %ds                     /* save current data segment */
621         pushl   %fs
622
623         movl    $KDSEL, %eax
624         movl    %ax, %ds                /* use KERNEL data segment */
625         movl    $KPSEL, %eax
626         movl    %ax, %fs
627
628         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
629
630         movl    $0, %ebx                
631         movl    20(%esp), %eax  
632         andl    $3, %eax
633         cmpl    $3, %eax
634         je      1f
635         testl   $PSL_VM, 24(%esp)
636         jne     1f
637         incl    %ebx                    /* system or interrupt */
638 #ifdef CPL_AND_CML      
639         cmpl    $0, _inside_intr
640         je      1f
641         incl    %ebx                    /* interrupt */
642 #endif
643 1:      
644         movl    _cpuid, %eax
645         movl    %ebx, _checkstate_cpustate(,%eax,4)
646         movl    _curproc, %ebx
647         movl    %ebx, _checkstate_curproc(,%eax,4)
648         movl    16(%esp), %ebx
649         movl    %ebx, _checkstate_pc(,%eax,4)
650
651         lock                            /* checkstate_probed_cpus |= (1<<id) */
652         btsl    %eax, _checkstate_probed_cpus
653
654         popl    %fs
655         popl    %ds                     /* restore previous data segment */
656         popl    %ebx
657         popl    %eax
658         iret
659
660 #endif /* BETTER_CLOCK */
661
662 /*
663  * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
664  *
665  *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
666  *
667  *  - We need a better method of triggering asts on other cpus.
668  */
669
670         .text
671         SUPERALIGN_TEXT
672         .globl _Xcpuast
673 _Xcpuast:
674         PUSH_FRAME
675         movl    $KDSEL, %eax
676         movl    %ax, %ds                /* use KERNEL data segment */
677         movl    %ax, %es
678         movl    $KPSEL, %eax
679         movl    %ax, %fs
680
681         movl    _cpuid, %eax
682         lock                            /* checkstate_need_ast &= ~(1<<id) */
683         btrl    %eax, _checkstate_need_ast
684         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
685
686         lock
687         btsl    %eax, _checkstate_pending_ast
688         jc      1f
689
690         FAKE_MCOUNT(13*4(%esp))
691
692         /* 
693          * Giant locks do not come cheap.
694          * A lot of cycles are going to be wasted here.
695          */
696         call    _get_isrlock
697
698         AVCPL_LOCK
699 #ifdef CPL_AND_CML
700         movl    _cml, %eax
701 #else
702         movl    _cpl, %eax
703 #endif
704         pushl   %eax
705         movl    $1, _astpending         /* XXX */
706         AVCPL_UNLOCK
707         lock
708         incb    _intr_nesting_level
709         sti
710         
711         pushl   $0
712         
713         movl    _cpuid, %eax
714         lock    
715         btrl    %eax, _checkstate_pending_ast
716         lock    
717         btrl    %eax, CNAME(resched_cpus)
718         jnc     2f
719         movl    $1, CNAME(want_resched)
720         lock
721         incl    CNAME(want_resched_cnt)
722 2:              
723         lock
724         incl    CNAME(cpuast_cnt)
725         MEXITCOUNT
726         jmp     _doreti
727 1:
728         /* We are already in the process of delivering an ast for this CPU */
729         POP_FRAME
730         iret                    
731
732
733 /*
734  *       Executed by a CPU when it receives an XFORWARD_IRQ IPI.
735  */
736
737         .text
738         SUPERALIGN_TEXT
739         .globl _Xforward_irq
740 _Xforward_irq:
741         PUSH_FRAME
742         movl    $KDSEL, %eax
743         movl    %ax, %ds                /* use KERNEL data segment */
744         movl    %ax, %es
745         movl    $KPSEL, %eax
746         movl    %ax, %fs
747
748         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
749
750         FAKE_MCOUNT(13*4(%esp))
751
752         ISR_TRYLOCK
753         testl   %eax,%eax               /* Did we get the lock ? */
754         jz  1f                          /* No */
755
756         lock
757         incl    CNAME(forward_irq_hitcnt)
758         cmpb    $4, _intr_nesting_level
759         jae     2f
760         
761         AVCPL_LOCK
762 #ifdef CPL_AND_CML
763         movl    _cml, %eax
764 #else
765         movl    _cpl, %eax
766 #endif
767         pushl   %eax
768         AVCPL_UNLOCK
769         lock
770         incb    _intr_nesting_level
771         sti
772         
773         pushl   $0
774
775         MEXITCOUNT
776         jmp     _doreti                 /* Handle forwarded interrupt */
777 1:
778         lock
779         incl    CNAME(forward_irq_misscnt)
780         call    forward_irq     /* Oops, we've lost the isr lock */
781         MEXITCOUNT
782         POP_FRAME
783         iret
784 2:
785         lock
786         incl    CNAME(forward_irq_toodeepcnt)
787 3:      
788         ISR_RELLOCK
789         MEXITCOUNT
790         POP_FRAME
791         iret
792
793 /*
794  * 
795  */
796 forward_irq:
797         MCOUNT
798         cmpl    $0,_invltlb_ok
799         jz      4f
800
801         cmpl    $0, CNAME(forward_irq_enabled)
802         jz      4f
803
804         movl    _mp_lock,%eax
805         cmpl    $FREE_LOCK,%eax
806         jne     1f
807         movl    $0, %eax                /* Pick CPU #0 if noone has lock */
808 1:
809         shrl    $24,%eax
810         movl    _cpu_num_to_apic_id(,%eax,4),%ecx
811         shll    $24,%ecx
812         movl    lapic_icr_hi, %eax
813         andl    $~APIC_ID_MASK, %eax
814         orl     %ecx, %eax
815         movl    %eax, lapic_icr_hi
816
817 2:
818         movl    lapic_icr_lo, %eax
819         andl    $APIC_DELSTAT_MASK,%eax
820         jnz     2b
821         movl    lapic_icr_lo, %eax
822         andl    $APIC_RESV2_MASK, %eax
823         orl     $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
824         movl    %eax, lapic_icr_lo
825 3:
826         movl    lapic_icr_lo, %eax
827         andl    $APIC_DELSTAT_MASK,%eax
828         jnz     3b
829 4:              
830         ret
831         
832 /*
833  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
834  *
835  *  - Signals its receipt.
836  *  - Waits for permission to restart.
837  *  - Signals its restart.
838  */
839
840         .text
841         SUPERALIGN_TEXT
842         .globl _Xcpustop
843 _Xcpustop:
844         pushl   %ebp
845         movl    %esp, %ebp
846         pushl   %eax
847         pushl   %ecx
848         pushl   %edx
849         pushl   %ds                     /* save current data segment */
850         pushl   %fs
851
852         movl    $KDSEL, %eax
853         movl    %ax, %ds                /* use KERNEL data segment */
854         movl    $KPSEL, %eax
855         movl    %ax, %fs
856
857         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
858
859         movl    _cpuid, %eax
860         imull   $PCB_SIZE, %eax
861         leal    CNAME(stoppcbs)(%eax), %eax
862         pushl   %eax
863         call    CNAME(savectx)          /* Save process context */
864         addl    $4, %esp
865         
866                 
867         movl    _cpuid, %eax
868
869         lock
870         btsl    %eax, _stopped_cpus     /* stopped_cpus |= (1<<id) */
871 1:
872         btl     %eax, _started_cpus     /* while (!(started_cpus & (1<<id))) */
873         jnc     1b
874
875         lock
876         btrl    %eax, _started_cpus     /* started_cpus &= ~(1<<id) */
877         lock
878         btrl    %eax, _stopped_cpus     /* stopped_cpus &= ~(1<<id) */
879
880         test    %eax, %eax
881         jnz     2f
882
883         movl    CNAME(cpustop_restartfunc), %eax
884         test    %eax, %eax
885         jz      2f
886         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
887
888         call    %eax
889 2:
890         popl    %fs
891         popl    %ds                     /* restore previous data segment */
892         popl    %edx
893         popl    %ecx
894         popl    %eax
895         movl    %ebp, %esp
896         popl    %ebp
897         iret
898
899
900 MCOUNT_LABEL(bintr)
901         FAST_INTR(0,fastintr0)
902         FAST_INTR(1,fastintr1)
903         FAST_INTR(2,fastintr2)
904         FAST_INTR(3,fastintr3)
905         FAST_INTR(4,fastintr4)
906         FAST_INTR(5,fastintr5)
907         FAST_INTR(6,fastintr6)
908         FAST_INTR(7,fastintr7)
909         FAST_INTR(8,fastintr8)
910         FAST_INTR(9,fastintr9)
911         FAST_INTR(10,fastintr10)
912         FAST_INTR(11,fastintr11)
913         FAST_INTR(12,fastintr12)
914         FAST_INTR(13,fastintr13)
915         FAST_INTR(14,fastintr14)
916         FAST_INTR(15,fastintr15)
917         FAST_INTR(16,fastintr16)
918         FAST_INTR(17,fastintr17)
919         FAST_INTR(18,fastintr18)
920         FAST_INTR(19,fastintr19)
921         FAST_INTR(20,fastintr20)
922         FAST_INTR(21,fastintr21)
923         FAST_INTR(22,fastintr22)
924         FAST_INTR(23,fastintr23)
925 #define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
926         INTR(0,intr0, CLKINTR_PENDING)
927         INTR(1,intr1,)
928         INTR(2,intr2,)
929         INTR(3,intr3,)
930         INTR(4,intr4,)
931         INTR(5,intr5,)
932         INTR(6,intr6,)
933         INTR(7,intr7,)
934         INTR(8,intr8,)
935         INTR(9,intr9,)
936         INTR(10,intr10,)
937         INTR(11,intr11,)
938         INTR(12,intr12,)
939         INTR(13,intr13,)
940         INTR(14,intr14,)
941         INTR(15,intr15,)
942         INTR(16,intr16,)
943         INTR(17,intr17,)
944         INTR(18,intr18,)
945         INTR(19,intr19,)
946         INTR(20,intr20,)
947         INTR(21,intr21,)
948         INTR(22,intr22,)
949         INTR(23,intr23,)
950 MCOUNT_LABEL(eintr)
951
952 /*
953  * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
954  *
955  * - Calls the generic rendezvous action function.
956  */
957         .text
958         SUPERALIGN_TEXT
959         .globl  _Xrendezvous
960 _Xrendezvous:
961         PUSH_FRAME
962         movl    $KDSEL, %eax
963         movl    %ax, %ds                /* use KERNEL data segment */
964         movl    %ax, %es
965         movl    $KPSEL, %eax
966         movl    %ax, %fs
967
968         call    _smp_rendezvous_action
969
970         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
971         POP_FRAME
972         iret
973         
974         
975         .data
976 /*
977  * Addresses of interrupt handlers.
978  *  XresumeNN: Resumption addresses for HWIs.
979  */
980         .globl _ihandlers
981 _ihandlers:
982 /*
983  * used by:
984  *  ipl.s:      doreti_unpend
985  */
986         .long   Xresume0,  Xresume1,  Xresume2,  Xresume3 
987         .long   Xresume4,  Xresume5,  Xresume6,  Xresume7
988         .long   Xresume8,  Xresume9,  Xresume10, Xresume11
989         .long   Xresume12, Xresume13, Xresume14, Xresume15 
990         .long   Xresume16, Xresume17, Xresume18, Xresume19
991         .long   Xresume20, Xresume21, Xresume22, Xresume23
992 /*
993  * used by:
994  *  ipl.s:      doreti_unpend
995  *  apic_ipl.s: splz_unpend
996  */
997         .long   _swi_null, swi_net, _swi_null, _swi_null
998         .long   _swi_vm, _swi_null, _softclock, _swi_null
999
1000 imasks:                         /* masks for interrupt handlers */
1001         .space  NHWI*4          /* padding; HWI masks are elsewhere */
1002
1003         .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
1004         .long   SWI_VM_MASK, 0, SWI_CLOCK_MASK, 0
1005
1006 /* active flag for lazy masking */
1007 iactive:
1008         .long   0
1009
1010 #ifdef COUNT_XINVLTLB_HITS
1011         .globl  _xhits
1012 _xhits:
1013         .space  (NCPU * 4), 0
1014 #endif /* COUNT_XINVLTLB_HITS */
1015
1016 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
1017         .globl _stopped_cpus, _started_cpus
1018 _stopped_cpus:
1019         .long   0
1020 _started_cpus:
1021         .long   0
1022
1023 #ifdef BETTER_CLOCK
1024         .globl _checkstate_probed_cpus
1025 _checkstate_probed_cpus:
1026         .long   0       
1027 #endif /* BETTER_CLOCK */
1028         .globl _checkstate_need_ast
1029 _checkstate_need_ast:
1030         .long   0
1031 _checkstate_pending_ast:
1032         .long   0
1033         .globl CNAME(forward_irq_misscnt)
1034         .globl CNAME(forward_irq_toodeepcnt)
1035         .globl CNAME(forward_irq_hitcnt)
1036         .globl CNAME(resched_cpus)
1037         .globl CNAME(want_resched_cnt)
1038         .globl CNAME(cpuast_cnt)
1039         .globl CNAME(cpustop_restartfunc)
1040 CNAME(forward_irq_misscnt):     
1041         .long 0
1042 CNAME(forward_irq_hitcnt):      
1043         .long 0
1044 CNAME(forward_irq_toodeepcnt):
1045         .long 0
1046 CNAME(resched_cpus):
1047         .long 0
1048 CNAME(want_resched_cnt):
1049         .long 0
1050 CNAME(cpuast_cnt):
1051         .long 0
1052 CNAME(cpustop_restartfunc):
1053         .long 0
1054                 
1055
1056
1057         .globl  _apic_pin_trigger
1058 _apic_pin_trigger:
1059         .long   0
1060
1061         .text