]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/i386/isa/apic_vector.s
This commit was generated by cvs2svn to compensate for changes in r53142,
[FreeBSD/FreeBSD.git] / sys / i386 / isa / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD$
4  */
5
6
7 #include <machine/apic.h>
8 #include <machine/smp.h>
9
10 #include "i386/isa/intr_machdep.h"
11
12
13 #ifdef FAST_SIMPLELOCK
14
15 #define GET_FAST_INTR_LOCK                                              \
16         pushl   $_fast_intr_lock ;              /* address of lock */   \
17         call    _s_lock ;                       /* MP-safe */           \
18         addl    $4,%esp
19
20 #define REL_FAST_INTR_LOCK                                              \
21         movl    $0, _fast_intr_lock
22
23 #else /* FAST_SIMPLELOCK */
24
25 #define GET_FAST_INTR_LOCK                                              \
26         call    _get_isrlock
27
28 #define REL_FAST_INTR_LOCK                                              \
29         pushl   $_mp_lock ;     /* GIANT_LOCK */                        \
30         call    _MPrellock ;                                            \
31         add     $4, %esp
32
33 #endif /* FAST_SIMPLELOCK */
34
35 /* convert an absolute IRQ# into a bitmask */
36 #define IRQ_BIT(irq_num)        (1 << (irq_num))
37
38 /* make an index into the IO APIC from the IRQ# */
39 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
40
41
42 /*
43  * Macros for interrupt interrupt entry, call to handler, and exit.
44  */
45
46 #ifdef FAST_WITHOUTCPL
47
48 /*
49  */
50 #define FAST_INTR(irq_num, vec_name)                                    \
51         .text ;                                                         \
52         SUPERALIGN_TEXT ;                                               \
53 IDTVEC(vec_name) ;                                                      \
54         pushl   %eax ;          /* save only call-used registers */     \
55         pushl   %ecx ;                                                  \
56         pushl   %edx ;                                                  \
57         pushl   %ds ;                                                   \
58         MAYBE_PUSHL_ES ;                                                \
59         pushl   %fs ;                                                   \
60         movl    $KDSEL,%eax ;                                           \
61         movl    %ax,%ds ;                                               \
62         MAYBE_MOVW_AX_ES ;                                              \
63         movl    $KPSEL,%eax ;                                           \
64         movl    %ax,%fs ;                                               \
65         FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;                      \
66         pushl   _intr_unit + (irq_num) * 4 ;                            \
67         GET_FAST_INTR_LOCK ;                                            \
68         call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
69         REL_FAST_INTR_LOCK ;                                            \
70         addl    $4, %esp ;                                              \
71         movl    $0, lapic_eoi ;                                         \
72         lock ;                                                          \
73         incl    _cnt+V_INTR ;   /* book-keeping can wait */             \
74         movl    _intr_countp + (irq_num) * 4, %eax ;                    \
75         lock ;                                                          \
76         incl    (%eax) ;                                                \
77         MEXITCOUNT ;                                                    \
78         popl    %fs ;                                                   \
79         MAYBE_POPL_ES ;                                                 \
80         popl    %ds ;                                                   \
81         popl    %edx ;                                                  \
82         popl    %ecx ;                                                  \
83         popl    %eax ;                                                  \
84         iret
85
86 #else /* FAST_WITHOUTCPL */
87
88 #define FAST_INTR(irq_num, vec_name)                                    \
89         .text ;                                                         \
90         SUPERALIGN_TEXT ;                                               \
91 IDTVEC(vec_name) ;                                                      \
92         pushl   %eax ;          /* save only call-used registers */     \
93         pushl   %ecx ;                                                  \
94         pushl   %edx ;                                                  \
95         pushl   %ds ;                                                   \
96         MAYBE_PUSHL_ES ;                                                \
97         pushl   %fs ;                                                   \
98         movl    $KDSEL, %eax ;                                          \
99         movl    %ax, %ds ;                                              \
100         MAYBE_MOVW_AX_ES ;                                              \
101         movl    $KPSEL, %eax ;                                          \
102         movl    %ax, %fs ;                                              \
103         FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;                      \
104         GET_FAST_INTR_LOCK ;                                            \
105         pushl   _intr_unit + (irq_num) * 4 ;                            \
106         call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
107         addl    $4, %esp ;                                              \
108         movl    $0, lapic_eoi ;                                         \
109         lock ;                                                          \
110         incl    _cnt+V_INTR ;   /* book-keeping can wait */             \
111         movl    _intr_countp + (irq_num) * 4,%eax ;                     \
112         lock ;                                                          \
113         incl    (%eax) ;                                                \
114         movl    _cpl, %eax ;    /* unmasking pending HWIs or SWIs? */   \
115         notl    %eax ;                                                  \
116         andl    _ipending, %eax ;                                       \
117         jne     2f ;            /* yes, maybe handle them */            \
118 1: ;                                                                    \
119         MEXITCOUNT ;                                                    \
120         REL_FAST_INTR_LOCK ;                                            \
121         popl    %fs ;                                                   \
122         MAYBE_POPL_ES ;                                                 \
123         popl    %ds ;                                                   \
124         popl    %edx ;                                                  \
125         popl    %ecx ;                                                  \
126         popl    %eax ;                                                  \
127         iret ;                                                          \
128 ;                                                                       \
129         ALIGN_TEXT ;                                                    \
130 2: ;                                                                    \
131         cmpb    $3, _intr_nesting_level ;       /* enough stack? */     \
132         jae     1b ;            /* no, return */                        \
133         movl    _cpl, %eax ;                                            \
134         /* XXX next line is probably unnecessary now. */                \
135         movl    $HWI_MASK|SWI_MASK, _cpl ;      /* limit nesting ... */ \
136         lock ;                                                          \
137         incb    _intr_nesting_level ;   /* ... really limit it ... */   \
138         sti ;                   /* to do this as early as possible */   \
139         popl    %fs ;           /* discard most of thin frame ... */    \
140         MAYBE_POPL_ES ;         /* discard most of thin frame ... */    \
141         popl    %ecx ;          /* ... original %ds ... */              \
142         popl    %edx ;                                                  \
143         xchgl   %eax, 4(%esp) ; /* orig %eax; save cpl */               \
144         pushal ;                /* build fat frame (grrr) ... */        \
145         pushl   %ecx ;          /* ... actually %ds ... */              \
146         pushl   %es ;                                                   \
147         pushl   %fs ;
148         movl    $KDSEL, %eax ;                                          \
149         movl    %ax, %es ;                                              \
150         movl    $KPSEL, %eax ;
151         movl    %ax, %fs ;
152         movl    (3+8+0)*4(%esp), %ecx ; /* %ecx from thin frame ... */  \
153         movl    %ecx, (3+6)*4(%esp) ;   /* ... to fat frame ... */      \
154         movl    (3+8+1)*4(%esp), %eax ; /* ... cpl from thin frame */   \
155         pushl   %eax ;                                                  \
156         subl    $4, %esp ;      /* junk for unit number */              \
157         MEXITCOUNT ;                                                    \
158         jmp     _doreti
159
160 #endif /** FAST_WITHOUTCPL */
161
162
163 /*
164  * 
165  */
166 #define PUSH_FRAME                                                      \
167         pushl   $0 ;            /* dummy error code */                  \
168         pushl   $0 ;            /* dummy trap type */                   \
169         pushal ;                                                        \
170         pushl   %ds ;           /* save data and extra segments ... */  \
171         pushl   %es ;                                                   \
172         pushl   %fs
173
174 #define POP_FRAME                                                       \
175         popl    %fs ;                                                   \
176         popl    %es ;                                                   \
177         popl    %ds ;                                                   \
178         popal ;                                                         \
179         addl    $4+4,%esp
180
181 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
182 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
183         
184 #define MASK_IRQ(irq_num)                                               \
185         IMASK_LOCK ;                            /* into critical reg */ \
186         testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
187         jne     7f ;                    /* masked, don't mask */        \
188         orl     $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */  \
189         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
190         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
191         movl    %eax, (%ecx) ;                  /* write the index */   \
192         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
193         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
194         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
195 7: ;                                            /* already masked */    \
196         IMASK_UNLOCK
197 /*
198  * Test to see whether we are handling an edge or level triggered INT.
199  *  Level-triggered INTs must still be masked as we don't clear the source,
200  *  and the EOI cycle would cause redundant INTs to occur.
201  */
202 #define MASK_LEVEL_IRQ(irq_num)                                         \
203         testl   $IRQ_BIT(irq_num), _apic_pin_trigger ;                  \
204         jz      9f ;                            /* edge, don't mask */  \
205         MASK_IRQ(irq_num) ;                                             \
206 9:
207
208
209 #ifdef APIC_INTR_REORDER
210 #define EOI_IRQ(irq_num)                                                \
211         movl    _apic_isrbit_location + 8 * (irq_num), %eax ;           \
212         movl    (%eax), %eax ;                                          \
213         testl   _apic_isrbit_location + 4 + 8 * (irq_num), %eax ;       \
214         jz      9f ;                            /* not active */        \
215         movl    $0, lapic_eoi ;                                         \
216         APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
217 9:
218
219 #else
220 #define EOI_IRQ(irq_num)                                                \
221         testl   $IRQ_BIT(irq_num), lapic_isr1;                          \
222         jz      9f      ;                       /* not active */        \
223         movl    $0, lapic_eoi;                                          \
224         APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
225 9:
226 #endif
227         
228         
229 /*
230  * Test to see if the source is currntly masked, clear if so.
231  */
232 #define UNMASK_IRQ(irq_num)                                     \
233         IMASK_LOCK ;                            /* into critical reg */ \
234         testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
235         je      7f ;                    /* bit clear, not masked */     \
236         andl    $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */    \
237         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
238         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
239         movl    %eax,(%ecx) ;                   /* write the index */   \
240         movl    IOAPIC_WINDOW(%ecx),%eax ;      /* current value */     \
241         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
242         movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
243 7: ;                                                                    \
244         IMASK_UNLOCK
245
246 #ifdef INTR_SIMPLELOCK
247 #define ENLOCK
248 #define DELOCK
249 #define LATELOCK call   _get_isrlock
250 #else
251 #define ENLOCK \
252         ISR_TRYLOCK ;           /* XXX this is going away... */         \
253         testl   %eax, %eax ;                    /* did we get it? */    \
254         jz      3f
255 #define DELOCK  ISR_RELLOCK
256 #define LATELOCK
257 #endif
258
259 #ifdef APIC_INTR_DIAGNOSTIC
260 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
261 log_intr_event:
262         pushf
263         cli
264         pushl   $CNAME(apic_itrace_debuglock)
265         call    CNAME(s_lock_np)
266         addl    $4, %esp
267         movl    CNAME(apic_itrace_debugbuffer_idx), %ecx
268         andl    $32767, %ecx
269         movl    _cpuid, %eax
270         shll    $8,     %eax
271         orl     8(%esp), %eax
272         movw    %ax,    CNAME(apic_itrace_debugbuffer)(,%ecx,2)
273         incl    %ecx
274         andl    $32767, %ecx
275         movl    %ecx,   CNAME(apic_itrace_debugbuffer_idx)
276         pushl   $CNAME(apic_itrace_debuglock)
277         call    CNAME(s_unlock_np)
278         addl    $4, %esp
279         popf
280         ret
281         
282
283 #define APIC_ITRACE(name, irq_num, id)                                  \
284         lock ;                                  /* MP-safe */           \
285         incl    CNAME(name) + (irq_num) * 4 ;                           \
286         pushl   %eax ;                                                  \
287         pushl   %ecx ;                                                  \
288         pushl   %edx ;                                                  \
289         movl    $(irq_num), %eax ;                                      \
290         cmpl    $APIC_INTR_DIAGNOSTIC_IRQ, %eax ;                       \
291         jne     7f ;                                                    \
292         pushl   $id ;                                                   \
293         call    log_intr_event ;                                        \
294         addl    $4, %esp ;                                              \
295 7: ;                                                                    \
296         popl    %edx ;                                                  \
297         popl    %ecx ;                                                  \
298         popl    %eax
299 #else
300 #define APIC_ITRACE(name, irq_num, id)                                  \
301         lock ;                                  /* MP-safe */           \
302         incl    CNAME(name) + (irq_num) * 4
303 #endif
304
305 #define APIC_ITRACE_ENTER 1
306 #define APIC_ITRACE_EOI 2
307 #define APIC_ITRACE_TRYISRLOCK 3
308 #define APIC_ITRACE_GOTISRLOCK 4
309 #define APIC_ITRACE_ENTER2 5
310 #define APIC_ITRACE_LEAVE 6
311 #define APIC_ITRACE_UNMASK 7
312 #define APIC_ITRACE_ACTIVE 8
313 #define APIC_ITRACE_MASKED 9
314 #define APIC_ITRACE_NOISRLOCK 10
315 #define APIC_ITRACE_MASKED2 11
316 #define APIC_ITRACE_SPLZ 12
317 #define APIC_ITRACE_DORETI 13   
318         
319 #else   
320 #define APIC_ITRACE(name, irq_num, id)
321 #endif
322                 
323 #ifdef CPL_AND_CML
324
325 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
326         .text ;                                                         \
327         SUPERALIGN_TEXT ;                                               \
328 /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */  \
329 IDTVEC(vec_name) ;                                                      \
330         PUSH_FRAME ;                                                    \
331         movl    $KDSEL, %eax ;  /* reload with kernel's data segment */ \
332         movl    %ax, %ds ;                                              \
333         movl    %ax, %es ;                                              \
334         movl    $KPSEL, %eax ;                                          \
335         movl    %ax, %fs ;                                              \
336 ;                                                                       \
337         maybe_extra_ipending ;                                          \
338 ;                                                                       \
339         APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;    \
340         lock ;                                  /* MP-safe */           \
341         btsl    $(irq_num), iactive ;           /* lazy masking */      \
342         jc      1f ;                            /* already active */    \
343 ;                                                                       \
344         MASK_LEVEL_IRQ(irq_num) ;                                       \
345         EOI_IRQ(irq_num) ;                                              \
346 0: ;                                                                    \
347         APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
348         ENLOCK ;                                                        \
349 ;                                                                       \
350         APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
351         AVCPL_LOCK ;                            /* MP-safe */           \
352         testl   $IRQ_BIT(irq_num), _cpl ;                               \
353         jne     2f ;                            /* this INT masked */   \
354         testl   $IRQ_BIT(irq_num), _cml ;                               \
355         jne     2f ;                            /* this INT masked */   \
356         orl     $IRQ_BIT(irq_num), _cil ;                               \
357         AVCPL_UNLOCK ;                                                  \
358 ;                                                                       \
359         incb    _intr_nesting_level ;                                   \
360 ;                                                                       \
361   /* entry point used by doreti_unpend for HWIs. */                     \
362 __CONCAT(Xresume,irq_num): ;                                            \
363         FAKE_MCOUNT(13*4(%esp)) ;               /* XXX avoid dbl cnt */ \
364         lock ;  incl    _cnt+V_INTR ;           /* tally interrupts */  \
365         movl    _intr_countp + (irq_num) * 4, %eax ;                    \
366         lock ;  incl    (%eax) ;                                        \
367 ;                                                                       \
368         AVCPL_LOCK ;                            /* MP-safe */           \
369         movl    _cml, %eax ;                                            \
370         pushl   %eax ;                                                  \
371         orl     _intr_mask + (irq_num) * 4, %eax ;                      \
372         movl    %eax, _cml ;                                            \
373         AVCPL_UNLOCK ;                                                  \
374 ;                                                                       \
375         pushl   _intr_unit + (irq_num) * 4 ;                            \
376         incl    _inside_intr ;                                          \
377         APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;  \
378         sti ;                                                           \
379         call    *_intr_handler + (irq_num) * 4 ;                        \
380         cli ;                                                           \
381         APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;    \
382         decl    _inside_intr ;                                          \
383 ;                                                                       \
384         lock ;  andl $~IRQ_BIT(irq_num), iactive ;                      \
385         lock ;  andl $~IRQ_BIT(irq_num), _cil ;                         \
386         UNMASK_IRQ(irq_num) ;                                           \
387         APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;  \
388         sti ;                           /* doreti repeats cli/sti */    \
389         MEXITCOUNT ;                                                    \
390         LATELOCK ;                                                      \
391         jmp     _doreti ;                                               \
392 ;                                                                       \
393         ALIGN_TEXT ;                                                    \
394 1: ;                                            /* active */            \
395         APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;  \
396         MASK_IRQ(irq_num) ;                                             \
397         EOI_IRQ(irq_num) ;                                              \
398         AVCPL_LOCK ;                            /* MP-safe */           \
399         lock ;                                                          \
400         orl     $IRQ_BIT(irq_num), _ipending ;                          \
401         AVCPL_UNLOCK ;                                                  \
402         lock ;                                                          \
403         btsl    $(irq_num), iactive ;           /* still active */      \
404         jnc     0b ;                            /* retry */             \
405         POP_FRAME ;                                                     \
406         iret ;                                                          \
407 ;                                                                       \
408         ALIGN_TEXT ;                                                    \
409 2: ;                                            /* masked by cpl|cml */ \
410         APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;  \
411         lock ;                                                          \
412         orl     $IRQ_BIT(irq_num), _ipending ;                          \
413         AVCPL_UNLOCK ;                                                  \
414         DELOCK ;                /* XXX this is going away... */         \
415         POP_FRAME ;                                                     \
416         iret ;                                                          \
417         ALIGN_TEXT ;                                                    \
418 3: ;                    /* other cpu has isr lock */                    \
419         APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
420         AVCPL_LOCK ;                            /* MP-safe */           \
421         lock ;                                                          \
422         orl     $IRQ_BIT(irq_num), _ipending ;                          \
423         testl   $IRQ_BIT(irq_num), _cpl ;                               \
424         jne     4f ;                            /* this INT masked */   \
425         testl   $IRQ_BIT(irq_num), _cml ;                               \
426         jne     4f ;                            /* this INT masked */   \
427         orl     $IRQ_BIT(irq_num), _cil ;                               \
428         AVCPL_UNLOCK ;                                                  \
429         call    forward_irq ;   /* forward irq to lock holder */        \
430         POP_FRAME ;                             /* and return */        \
431         iret ;                                                          \
432         ALIGN_TEXT ;                                                    \
433 4: ;                                            /* blocked */           \
434         APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
435         AVCPL_UNLOCK ;                                                  \
436         POP_FRAME ;                             /* and return */        \
437         iret
438
439 #else /* CPL_AND_CML */
440
441
442 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
443         .text ;                                                         \
444         SUPERALIGN_TEXT ;                                               \
445 /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */  \
446 IDTVEC(vec_name) ;                                                      \
447         PUSH_FRAME ;                                                    \
448         movl    $KDSEL, %eax ;  /* reload with kernel's data segment */ \
449         movl    %ax, %ds ;                                              \
450         movl    %ax, %es ;                                              \
451         movl    $KPSEL, %eax ;                                          \
452         movl    %ax, %fs ;                                              \
453 ;                                                                       \
454         maybe_extra_ipending ;                                          \
455 ;                                                                       \
456         APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;    \
457         lock ;                                  /* MP-safe */           \
458         btsl    $(irq_num), iactive ;           /* lazy masking */      \
459         jc      1f ;                            /* already active */    \
460 ;                                                                       \
461         MASK_LEVEL_IRQ(irq_num) ;                                       \
462         EOI_IRQ(irq_num) ;                                              \
463 0: ;                                                                    \
464         APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
465         ISR_TRYLOCK ;           /* XXX this is going away... */         \
466         testl   %eax, %eax ;                    /* did we get it? */    \
467         jz      3f ;                            /* no */                \
468 ;                                                                       \
469         APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
470         AVCPL_LOCK ;                            /* MP-safe */           \
471         testl   $IRQ_BIT(irq_num), _cpl ;                               \
472         jne     2f ;                            /* this INT masked */   \
473         AVCPL_UNLOCK ;                                                  \
474 ;                                                                       \
475         incb    _intr_nesting_level ;                                   \
476 ;                                                                       \
477   /* entry point used by doreti_unpend for HWIs. */                     \
478 __CONCAT(Xresume,irq_num): ;                                            \
479         FAKE_MCOUNT(13*4(%esp)) ;               /* XXX avoid dbl cnt */ \
480         lock ;  incl    _cnt+V_INTR ;           /* tally interrupts */  \
481         movl    _intr_countp + (irq_num) * 4, %eax ;                    \
482         lock ;  incl    (%eax) ;                                        \
483 ;                                                                       \
484         AVCPL_LOCK ;                            /* MP-safe */           \
485         movl    _cpl, %eax ;                                            \
486         pushl   %eax ;                                                  \
487         orl     _intr_mask + (irq_num) * 4, %eax ;                      \
488         movl    %eax, _cpl ;                                            \
489         lock ;                                                          \
490         andl    $~IRQ_BIT(irq_num), _ipending ;                         \
491         AVCPL_UNLOCK ;                                                  \
492 ;                                                                       \
493         pushl   _intr_unit + (irq_num) * 4 ;                            \
494         APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;  \
495         sti ;                                                           \
496         call    *_intr_handler + (irq_num) * 4 ;                        \
497         cli ;                                                           \
498         APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;    \
499 ;                                                                       \
500         lock ;  andl    $~IRQ_BIT(irq_num), iactive ;                   \
501         UNMASK_IRQ(irq_num) ;                                           \
502         APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;  \
503         sti ;                           /* doreti repeats cli/sti */    \
504         MEXITCOUNT ;                                                    \
505         jmp     _doreti ;                                               \
506 ;                                                                       \
507         ALIGN_TEXT ;                                                    \
508 1: ;                                            /* active  */           \
509         APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;  \
510         MASK_IRQ(irq_num) ;                                             \
511         EOI_IRQ(irq_num) ;                                              \
512         AVCPL_LOCK ;                            /* MP-safe */           \
513         lock ;                                                          \
514         orl     $IRQ_BIT(irq_num), _ipending ;                          \
515         AVCPL_UNLOCK ;                                                  \
516         lock ;                                                          \
517         btsl    $(irq_num), iactive ;           /* still active */      \
518         jnc     0b ;                            /* retry */             \
519         POP_FRAME ;                                                     \
520         iret ;          /* XXX:  iactive bit might be 0 now */          \
521         ALIGN_TEXT ;                                                    \
522 2: ;                            /* masked by cpl, leave iactive set */  \
523         APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;  \
524         lock ;                                                          \
525         orl     $IRQ_BIT(irq_num), _ipending ;                          \
526         AVCPL_UNLOCK ;                                                  \
527         ISR_RELLOCK ;           /* XXX this is going away... */         \
528         POP_FRAME ;                                                     \
529         iret ;                                                          \
530         ALIGN_TEXT ;                                                    \
531 3: ;                    /* other cpu has isr lock */                    \
532         APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
533         AVCPL_LOCK ;                            /* MP-safe */           \
534         lock ;                                                          \
535         orl     $IRQ_BIT(irq_num), _ipending ;                          \
536         testl   $IRQ_BIT(irq_num), _cpl ;                               \
537         jne     4f ;                            /* this INT masked */   \
538         AVCPL_UNLOCK ;                                                  \
539         call    forward_irq ;    /* forward irq to lock holder */       \
540         POP_FRAME ;                             /* and return */        \
541         iret ;                                                          \
542         ALIGN_TEXT ;                                                    \
543 4: ;                                            /* blocked */           \
544         APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
545         AVCPL_UNLOCK ;                                                  \
546         POP_FRAME ;                             /* and return */        \
547         iret
548
549 #endif /* CPL_AND_CML */
550
551
552 /*
553  * Handle "spurious INTerrupts".
554  * Notes:
555  *  This is different than the "spurious INTerrupt" generated by an
556  *   8259 PIC for missing INTs.  See the APIC documentation for details.
557  *  This routine should NOT do an 'EOI' cycle.
558  */
559         .text
560         SUPERALIGN_TEXT
561         .globl _Xspuriousint
562 _Xspuriousint:
563
564         /* No EOI cycle used here */
565
566         iret
567
568
569 /*
570  * Handle TLB shootdowns.
571  */
572         .text
573         SUPERALIGN_TEXT
574         .globl  _Xinvltlb
575 _Xinvltlb:
576         pushl   %eax
577
578 #ifdef COUNT_XINVLTLB_HITS
579         pushl   %fs
580         movl    $KPSEL, %eax
581         movl    %ax, %fs
582         movl    _cpuid, %eax
583         popl    %fs
584         ss
585         incl    _xhits(,%eax,4)
586 #endif /* COUNT_XINVLTLB_HITS */
587
588         movl    %cr3, %eax              /* invalidate the TLB */
589         movl    %eax, %cr3
590
591         ss                              /* stack segment, avoid %ds load */
592         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
593
594         popl    %eax
595         iret
596
597
598 #ifdef BETTER_CLOCK
599
600 /*
601  * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
602  *
603  *  - Stores current cpu state in checkstate_cpustate[cpuid]
604  *      0 == user, 1 == sys, 2 == intr
605  *  - Stores current process in checkstate_curproc[cpuid]
606  *
607  *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
608  *
609  * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
610  */
611
612         .text
613         SUPERALIGN_TEXT
614         .globl _Xcpucheckstate
615         .globl _checkstate_cpustate
616         .globl _checkstate_curproc
617         .globl _checkstate_pc
618 _Xcpucheckstate:
619         pushl   %eax
620         pushl   %ebx            
621         pushl   %ds                     /* save current data segment */
622         pushl   %fs
623
624         movl    $KDSEL, %eax
625         movl    %ax, %ds                /* use KERNEL data segment */
626         movl    $KPSEL, %eax
627         movl    %ax, %fs
628
629         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
630
631         movl    $0, %ebx                
632         movl    20(%esp), %eax  
633         andl    $3, %eax
634         cmpl    $3, %eax
635         je      1f
636         testl   $PSL_VM, 24(%esp)
637         jne     1f
638         incl    %ebx                    /* system or interrupt */
639 #ifdef CPL_AND_CML      
640         cmpl    $0, _inside_intr
641         je      1f
642         incl    %ebx                    /* interrupt */
643 #endif
644 1:      
645         movl    _cpuid, %eax
646         movl    %ebx, _checkstate_cpustate(,%eax,4)
647         movl    _curproc, %ebx
648         movl    %ebx, _checkstate_curproc(,%eax,4)
649         movl    16(%esp), %ebx
650         movl    %ebx, _checkstate_pc(,%eax,4)
651
652         lock                            /* checkstate_probed_cpus |= (1<<id) */
653         btsl    %eax, _checkstate_probed_cpus
654
655         popl    %fs
656         popl    %ds                     /* restore previous data segment */
657         popl    %ebx
658         popl    %eax
659         iret
660
661 #endif /* BETTER_CLOCK */
662
663 /*
664  * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
665  *
666  *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
667  *
668  *  - We need a better method of triggering asts on other cpus.
669  */
670
671         .text
672         SUPERALIGN_TEXT
673         .globl _Xcpuast
674 _Xcpuast:
675         PUSH_FRAME
676         movl    $KDSEL, %eax
677         movl    %ax, %ds                /* use KERNEL data segment */
678         movl    %ax, %es
679         movl    $KPSEL, %eax
680         movl    %ax, %fs
681
682         movl    _cpuid, %eax
683         lock                            /* checkstate_need_ast &= ~(1<<id) */
684         btrl    %eax, _checkstate_need_ast
685         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
686
687         lock
688         btsl    %eax, _checkstate_pending_ast
689         jc      1f
690
691         FAKE_MCOUNT(13*4(%esp))
692
693         /* 
694          * Giant locks do not come cheap.
695          * A lot of cycles are going to be wasted here.
696          */
697         call    _get_isrlock
698
699         AVCPL_LOCK
700 #ifdef CPL_AND_CML
701         movl    _cml, %eax
702 #else
703         movl    _cpl, %eax
704 #endif
705         pushl   %eax
706         movl    $1, _astpending         /* XXX */
707         AVCPL_UNLOCK
708         lock
709         incb    _intr_nesting_level
710         sti
711         
712         pushl   $0
713         
714         movl    _cpuid, %eax
715         lock    
716         btrl    %eax, _checkstate_pending_ast
717         lock    
718         btrl    %eax, CNAME(resched_cpus)
719         jnc     2f
720         movl    $1, CNAME(want_resched)
721         lock
722         incl    CNAME(want_resched_cnt)
723 2:              
724         lock
725         incl    CNAME(cpuast_cnt)
726         MEXITCOUNT
727         jmp     _doreti
728 1:
729         /* We are already in the process of delivering an ast for this CPU */
730         POP_FRAME
731         iret                    
732
733
734 /*
735  *       Executed by a CPU when it receives an XFORWARD_IRQ IPI.
736  */
737
738         .text
739         SUPERALIGN_TEXT
740         .globl _Xforward_irq
741 _Xforward_irq:
742         PUSH_FRAME
743         movl    $KDSEL, %eax
744         movl    %ax, %ds                /* use KERNEL data segment */
745         movl    %ax, %es
746         movl    $KPSEL, %eax
747         movl    %ax, %fs
748
749         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
750
751         FAKE_MCOUNT(13*4(%esp))
752
753         ISR_TRYLOCK
754         testl   %eax,%eax               /* Did we get the lock ? */
755         jz  1f                          /* No */
756
757         lock
758         incl    CNAME(forward_irq_hitcnt)
759         cmpb    $4, _intr_nesting_level
760         jae     2f
761         
762         AVCPL_LOCK
763 #ifdef CPL_AND_CML
764         movl    _cml, %eax
765 #else
766         movl    _cpl, %eax
767 #endif
768         pushl   %eax
769         AVCPL_UNLOCK
770         lock
771         incb    _intr_nesting_level
772         sti
773         
774         pushl   $0
775
776         MEXITCOUNT
777         jmp     _doreti                 /* Handle forwarded interrupt */
778 1:
779         lock
780         incl    CNAME(forward_irq_misscnt)
781         call    forward_irq     /* Oops, we've lost the isr lock */
782         MEXITCOUNT
783         POP_FRAME
784         iret
785 2:
786         lock
787         incl    CNAME(forward_irq_toodeepcnt)
788 3:      
789         ISR_RELLOCK
790         MEXITCOUNT
791         POP_FRAME
792         iret
793
794 /*
795  * 
796  */
797 forward_irq:
798         MCOUNT
799         cmpl    $0,_invltlb_ok
800         jz      4f
801
802         cmpl    $0, CNAME(forward_irq_enabled)
803         jz      4f
804
805         movl    _mp_lock,%eax
806         cmpl    $FREE_LOCK,%eax
807         jne     1f
808         movl    $0, %eax                /* Pick CPU #0 if noone has lock */
809 1:
810         shrl    $24,%eax
811         movl    _cpu_num_to_apic_id(,%eax,4),%ecx
812         shll    $24,%ecx
813         movl    lapic_icr_hi, %eax
814         andl    $~APIC_ID_MASK, %eax
815         orl     %ecx, %eax
816         movl    %eax, lapic_icr_hi
817
818 2:
819         movl    lapic_icr_lo, %eax
820         andl    $APIC_DELSTAT_MASK,%eax
821         jnz     2b
822         movl    lapic_icr_lo, %eax
823         andl    $APIC_RESV2_MASK, %eax
824         orl     $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
825         movl    %eax, lapic_icr_lo
826 3:
827         movl    lapic_icr_lo, %eax
828         andl    $APIC_DELSTAT_MASK,%eax
829         jnz     3b
830 4:              
831         ret
832         
833 /*
834  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
835  *
836  *  - Signals its receipt.
837  *  - Waits for permission to restart.
838  *  - Signals its restart.
839  */
840
841         .text
842         SUPERALIGN_TEXT
843         .globl _Xcpustop
844 _Xcpustop:
845         pushl   %ebp
846         movl    %esp, %ebp
847         pushl   %eax
848         pushl   %ecx
849         pushl   %edx
850         pushl   %ds                     /* save current data segment */
851         pushl   %fs
852
853         movl    $KDSEL, %eax
854         movl    %ax, %ds                /* use KERNEL data segment */
855         movl    $KPSEL, %eax
856         movl    %ax, %fs
857
858         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
859
860         movl    _cpuid, %eax
861         imull   $PCB_SIZE, %eax
862         leal    CNAME(stoppcbs)(%eax), %eax
863         pushl   %eax
864         call    CNAME(savectx)          /* Save process context */
865         addl    $4, %esp
866         
867                 
868         movl    _cpuid, %eax
869
870         lock
871         btsl    %eax, _stopped_cpus     /* stopped_cpus |= (1<<id) */
872 1:
873         btl     %eax, _started_cpus     /* while (!(started_cpus & (1<<id))) */
874         jnc     1b
875
876         lock
877         btrl    %eax, _started_cpus     /* started_cpus &= ~(1<<id) */
878         lock
879         btrl    %eax, _stopped_cpus     /* stopped_cpus &= ~(1<<id) */
880
881         test    %eax, %eax
882         jnz     2f
883
884         movl    CNAME(cpustop_restartfunc), %eax
885         test    %eax, %eax
886         jz      2f
887         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
888
889         call    %eax
890 2:
891         popl    %fs
892         popl    %ds                     /* restore previous data segment */
893         popl    %edx
894         popl    %ecx
895         popl    %eax
896         movl    %ebp, %esp
897         popl    %ebp
898         iret
899
900
901 MCOUNT_LABEL(bintr)
902         FAST_INTR(0,fastintr0)
903         FAST_INTR(1,fastintr1)
904         FAST_INTR(2,fastintr2)
905         FAST_INTR(3,fastintr3)
906         FAST_INTR(4,fastintr4)
907         FAST_INTR(5,fastintr5)
908         FAST_INTR(6,fastintr6)
909         FAST_INTR(7,fastintr7)
910         FAST_INTR(8,fastintr8)
911         FAST_INTR(9,fastintr9)
912         FAST_INTR(10,fastintr10)
913         FAST_INTR(11,fastintr11)
914         FAST_INTR(12,fastintr12)
915         FAST_INTR(13,fastintr13)
916         FAST_INTR(14,fastintr14)
917         FAST_INTR(15,fastintr15)
918         FAST_INTR(16,fastintr16)
919         FAST_INTR(17,fastintr17)
920         FAST_INTR(18,fastintr18)
921         FAST_INTR(19,fastintr19)
922         FAST_INTR(20,fastintr20)
923         FAST_INTR(21,fastintr21)
924         FAST_INTR(22,fastintr22)
925         FAST_INTR(23,fastintr23)
926 #define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
927         INTR(0,intr0, CLKINTR_PENDING)
928         INTR(1,intr1,)
929         INTR(2,intr2,)
930         INTR(3,intr3,)
931         INTR(4,intr4,)
932         INTR(5,intr5,)
933         INTR(6,intr6,)
934         INTR(7,intr7,)
935         INTR(8,intr8,)
936         INTR(9,intr9,)
937         INTR(10,intr10,)
938         INTR(11,intr11,)
939         INTR(12,intr12,)
940         INTR(13,intr13,)
941         INTR(14,intr14,)
942         INTR(15,intr15,)
943         INTR(16,intr16,)
944         INTR(17,intr17,)
945         INTR(18,intr18,)
946         INTR(19,intr19,)
947         INTR(20,intr20,)
948         INTR(21,intr21,)
949         INTR(22,intr22,)
950         INTR(23,intr23,)
951 MCOUNT_LABEL(eintr)
952
953 /*
954  * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
955  *
956  * - Calls the generic rendezvous action function.
957  */
958         .text
959         SUPERALIGN_TEXT
960         .globl  _Xrendezvous
961 _Xrendezvous:
962         PUSH_FRAME
963         movl    $KDSEL, %eax
964         movl    %ax, %ds                /* use KERNEL data segment */
965         movl    %ax, %es
966         movl    $KPSEL, %eax
967         movl    %ax, %fs
968
969         call    _smp_rendezvous_action
970
971         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
972         POP_FRAME
973         iret
974         
975         
976         .data
977 /*
978  * Addresses of interrupt handlers.
979  *  XresumeNN: Resumption addresses for HWIs.
980  */
981         .globl _ihandlers
982 _ihandlers:
983 /*
984  * used by:
985  *  ipl.s:      doreti_unpend
986  */
987         .long   Xresume0,  Xresume1,  Xresume2,  Xresume3 
988         .long   Xresume4,  Xresume5,  Xresume6,  Xresume7
989         .long   Xresume8,  Xresume9,  Xresume10, Xresume11
990         .long   Xresume12, Xresume13, Xresume14, Xresume15 
991         .long   Xresume16, Xresume17, Xresume18, Xresume19
992         .long   Xresume20, Xresume21, Xresume22, Xresume23
993 /*
994  * used by:
995  *  ipl.s:      doreti_unpend
996  *  apic_ipl.s: splz_unpend
997  */
998         .long   _swi_null, swi_net, _swi_null, _swi_null
999         .long   _swi_vm, _swi_null, _softclock, _swi_null
1000
1001 imasks:                         /* masks for interrupt handlers */
1002         .space  NHWI*4          /* padding; HWI masks are elsewhere */
1003
1004         .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
1005         .long   SWI_VM_MASK, 0, SWI_CLOCK_MASK, 0
1006
1007 /* active flag for lazy masking */
1008 iactive:
1009         .long   0
1010
1011 #ifdef COUNT_XINVLTLB_HITS
1012         .globl  _xhits
1013 _xhits:
1014         .space  (NCPU * 4), 0
1015 #endif /* COUNT_XINVLTLB_HITS */
1016
1017 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
1018         .globl _stopped_cpus, _started_cpus
1019 _stopped_cpus:
1020         .long   0
1021 _started_cpus:
1022         .long   0
1023
1024 #ifdef BETTER_CLOCK
1025         .globl _checkstate_probed_cpus
1026 _checkstate_probed_cpus:
1027         .long   0       
1028 #endif /* BETTER_CLOCK */
1029         .globl _checkstate_need_ast
1030 _checkstate_need_ast:
1031         .long   0
1032 _checkstate_pending_ast:
1033         .long   0
1034         .globl CNAME(forward_irq_misscnt)
1035         .globl CNAME(forward_irq_toodeepcnt)
1036         .globl CNAME(forward_irq_hitcnt)
1037         .globl CNAME(resched_cpus)
1038         .globl CNAME(want_resched_cnt)
1039         .globl CNAME(cpuast_cnt)
1040         .globl CNAME(cpustop_restartfunc)
1041 CNAME(forward_irq_misscnt):     
1042         .long 0
1043 CNAME(forward_irq_hitcnt):      
1044         .long 0
1045 CNAME(forward_irq_toodeepcnt):
1046         .long 0
1047 CNAME(resched_cpus):
1048         .long 0
1049 CNAME(want_resched_cnt):
1050         .long 0
1051 CNAME(cpuast_cnt):
1052         .long 0
1053 CNAME(cpustop_restartfunc):
1054         .long 0
1055                 
1056
1057
1058         .globl  _apic_pin_trigger
1059 _apic_pin_trigger:
1060         .long   0
1061
1062         .text