]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/i386/i386/apic_vector.s
This commit was generated by cvs2svn to compensate for changes in r72003,
[FreeBSD/FreeBSD.git] / sys / i386 / i386 / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD$
4  */
5
6
7 #include <machine/apic.h>
8 #include <machine/smp.h>
9
10 #include "i386/isa/intr_machdep.h"
11
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_BIT(irq_num)        (1 << (irq_num))
14
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
17
18 /*
19  * 
20  */
21 #define PUSH_FRAME                                                      \
22         pushl   $0 ;            /* dummy error code */                  \
23         pushl   $0 ;            /* dummy trap type */                   \
24         pushal ;                                                        \
25         pushl   %ds ;           /* save data and extra segments ... */  \
26         pushl   %es ;                                                   \
27         pushl   %fs
28
29 #define POP_FRAME                                                       \
30         popl    %fs ;                                                   \
31         popl    %es ;                                                   \
32         popl    %ds ;                                                   \
33         popal ;                                                         \
34         addl    $4+4,%esp
35
36 /*
37  * Macros for interrupt entry, call to handler, and exit.
38  */
39
40 #define FAST_INTR(irq_num, vec_name)                                    \
41         .text ;                                                         \
42         SUPERALIGN_TEXT ;                                               \
43 IDTVEC(vec_name) ;                                                      \
44         PUSH_FRAME ;                                                    \
45         movl    $KDSEL,%eax ;                                           \
46         mov     %ax,%ds ;                                               \
47         mov     %ax,%es ;                                               \
48         movl    $KPSEL,%eax ;                                           \
49         mov     %ax,%fs ;                                               \
50         FAKE_MCOUNT(13*4(%esp)) ;                                       \
51         movl    PCPU(CURPROC),%ebx ;                                    \
52         incl    P_INTR_NESTING_LEVEL(%ebx) ;                            \
53         pushl   _intr_unit + (irq_num) * 4 ;                            \
54         call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
55         addl    $4, %esp ;                                              \
56         movl    $0, _lapic+LA_EOI ;                                     \
57         lock ;                                                          \
58         incl    _cnt+V_INTR ;   /* book-keeping can wait */             \
59         movl    _intr_countp + (irq_num) * 4, %eax ;                    \
60         lock ;                                                          \
61         incl    (%eax) ;                                                \
62         decl    P_INTR_NESTING_LEVEL(%ebx) ;                            \
63         MEXITCOUNT ;                                                    \
64         jmp     _doreti
65
66 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
67 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
68         
69 #define MASK_IRQ(irq_num)                                               \
70         IMASK_LOCK ;                            /* into critical reg */ \
71         testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
72         jne     7f ;                    /* masked, don't mask */        \
73         orl     $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */  \
74         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
75         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
76         movl    %eax, (%ecx) ;                  /* write the index */   \
77         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
78         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
79         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
80 7: ;                                            /* already masked */    \
81         IMASK_UNLOCK
82 /*
83  * Test to see whether we are handling an edge or level triggered INT.
84  *  Level-triggered INTs must still be masked as we don't clear the source,
85  *  and the EOI cycle would cause redundant INTs to occur.
86  */
87 #define MASK_LEVEL_IRQ(irq_num)                                         \
88         testl   $IRQ_BIT(irq_num), _apic_pin_trigger ;                  \
89         jz      9f ;                            /* edge, don't mask */  \
90         MASK_IRQ(irq_num) ;                                             \
91 9:
92
93
94 #ifdef APIC_INTR_REORDER
95 #define EOI_IRQ(irq_num)                                                \
96         movl    _apic_isrbit_location + 8 * (irq_num), %eax ;           \
97         movl    (%eax), %eax ;                                          \
98         testl   _apic_isrbit_location + 4 + 8 * (irq_num), %eax ;       \
99         jz      9f ;                            /* not active */        \
100         movl    $0, _lapic+LA_EOI ;                                     \
101 9:
102
103 #else
104 #define EOI_IRQ(irq_num)                                                \
105         testl   $IRQ_BIT(irq_num), _lapic+LA_ISR1;                      \
106         jz      9f      ;                       /* not active */        \
107         movl    $0, _lapic+LA_EOI;                                      \
108 9:
109 #endif
110         
111         
112 /*
113  * Test to see if the source is currently masked, clear if so.
114  */
115 #define UNMASK_IRQ(irq_num)                                     \
116         IMASK_LOCK ;                            /* into critical reg */ \
117         testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
118         je      7f ;                    /* bit clear, not masked */     \
119         andl    $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */    \
120         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
121         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
122         movl    %eax, (%ecx) ;                  /* write the index */   \
123         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
124         andl    $~IOART_INTMASK, %eax ;         /* clear the mask */    \
125         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
126 7: ;                                            /* already unmasked */  \
127         IMASK_UNLOCK
128
129 /* 
130  * Slow, threaded interrupts.
131  *
132  * XXX Most of the parameters here are obsolete.  Fix this when we're
133  * done.
134  * XXX we really shouldn't return via doreti if we just schedule the
135  * interrupt handler and don't run anything.  We could just do an
136  * iret.  FIXME.
137  */
138 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
139         .text ;                                                         \
140         SUPERALIGN_TEXT ;                                               \
141 /* _XintrNN: entry point used by IDT/HWIs via _vec[]. */                \
142 IDTVEC(vec_name) ;                                                      \
143         PUSH_FRAME ;                                                    \
144         movl    $KDSEL, %eax ;  /* reload with kernel's data segment */ \
145         mov     %ax, %ds ;                                              \
146         mov     %ax, %es ;                                              \
147         movl    $KPSEL, %eax ;                                          \
148         mov     %ax, %fs ;                                              \
149 ;                                                                       \
150         maybe_extra_ipending ;                                          \
151 ;                                                                       \
152         MASK_LEVEL_IRQ(irq_num) ;                                       \
153         EOI_IRQ(irq_num) ;                                              \
154 0: ;                                                                    \
155         movl    PCPU(CURPROC),%ebx ;                                    \
156         incl    P_INTR_NESTING_LEVEL(%ebx) ;                            \
157 ;                                                                       \
158   /* entry point used by doreti_unpend for HWIs. */                     \
159 __CONCAT(Xresume,irq_num): ;                                            \
160         FAKE_MCOUNT(13*4(%esp)) ;               /* XXX avoid dbl cnt */ \
161         pushl   $irq_num;                       /* pass the IRQ */      \
162         sti ;                                                           \
163         call    _sched_ithd ;                                           \
164         addl    $4, %esp ;              /* discard the parameter */     \
165 ;                                                                       \
166         decl    P_INTR_NESTING_LEVEL(%ebx) ;                            \
167         MEXITCOUNT ;                                                    \
168         jmp     _doreti
169
170 /*
171  * Handle "spurious INTerrupts".
172  * Notes:
173  *  This is different than the "spurious INTerrupt" generated by an
174  *   8259 PIC for missing INTs.  See the APIC documentation for details.
175  *  This routine should NOT do an 'EOI' cycle.
176  */
177         .text
178         SUPERALIGN_TEXT
179         .globl _Xspuriousint
180 _Xspuriousint:
181
182         /* No EOI cycle used here */
183
184         iret
185
186
187 /*
188  * Handle TLB shootdowns.
189  */
190         .text
191         SUPERALIGN_TEXT
192         .globl  _Xinvltlb
193 _Xinvltlb:
194         pushl   %eax
195
196 #ifdef COUNT_XINVLTLB_HITS
197         pushl   %fs
198         movl    $KPSEL, %eax
199         mov     %ax, %fs
200         movl    PCPU(CPUID), %eax
201         popl    %fs
202         ss
203         incl    _xhits(,%eax,4)
204 #endif /* COUNT_XINVLTLB_HITS */
205
206         movl    %cr3, %eax              /* invalidate the TLB */
207         movl    %eax, %cr3
208
209         ss                              /* stack segment, avoid %ds load */
210         movl    $0, _lapic+LA_EOI       /* End Of Interrupt to APIC */
211
212         popl    %eax
213         iret
214
215
216 #ifdef BETTER_CLOCK
217
218 /*
219  * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
220  *
221  *  - Stores current cpu state in checkstate_cpustate[cpuid]
222  *      0 == user, 1 == sys, 2 == intr
223  *  - Stores current process in checkstate_curproc[cpuid]
224  *
225  *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
226  *
227  * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
228  */
229
230         .text
231         SUPERALIGN_TEXT
232         .globl _Xcpucheckstate
233         .globl _checkstate_cpustate
234         .globl _checkstate_curproc
235         .globl _checkstate_pc
236 _Xcpucheckstate:
237         pushl   %eax
238         pushl   %ebx            
239         pushl   %ds                     /* save current data segment */
240         pushl   %fs
241
242         movl    $KDSEL, %eax
243         mov     %ax, %ds                /* use KERNEL data segment */
244         movl    $KPSEL, %eax
245         mov     %ax, %fs
246
247         movl    $0, _lapic+LA_EOI       /* End Of Interrupt to APIC */
248
249         movl    $0, %ebx                
250         movl    20(%esp), %eax  
251         andl    $3, %eax
252         cmpl    $3, %eax
253         je      1f
254         testl   $PSL_VM, 24(%esp)
255         jne     1f
256         incl    %ebx                    /* system or interrupt */
257 1:      
258         movl    PCPU(CPUID), %eax
259         movl    %ebx, _checkstate_cpustate(,%eax,4)
260         movl    PCPU(CURPROC), %ebx
261         movl    %ebx, _checkstate_curproc(,%eax,4)
262
263         movl    16(%esp), %ebx
264         movl    %ebx, _checkstate_pc(,%eax,4)
265
266         lock                            /* checkstate_probed_cpus |= (1<<id) */
267         btsl    %eax, _checkstate_probed_cpus
268
269         popl    %fs
270         popl    %ds                     /* restore previous data segment */
271         popl    %ebx
272         popl    %eax
273         iret
274
275 #endif /* BETTER_CLOCK */
276
277 /*
278  * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
279  *
280  *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
281  *
282  *  - We need a better method of triggering asts on other cpus.
283  */
284
285         .text
286         SUPERALIGN_TEXT
287         .globl _Xcpuast
288 _Xcpuast:
289         PUSH_FRAME
290         movl    $KDSEL, %eax
291         mov     %ax, %ds                /* use KERNEL data segment */
292         mov     %ax, %es
293         movl    $KPSEL, %eax
294         mov     %ax, %fs
295
296         movl    PCPU(CPUID), %eax
297         lock                            /* checkstate_need_ast &= ~(1<<id) */
298         btrl    %eax, _checkstate_need_ast
299         movl    $0, _lapic+LA_EOI       /* End Of Interrupt to APIC */
300
301         lock
302         btsl    %eax, _checkstate_pending_ast
303         jc      1f
304
305         FAKE_MCOUNT(13*4(%esp))
306
307         orl     $AST_PENDING, PCPU(ASTPENDING)  /* XXX */
308         movl    PCPU(CURPROC),%ebx
309         incl    P_INTR_NESTING_LEVEL(%ebx)
310         sti
311         
312         movl    PCPU(CPUID), %eax
313         lock    
314         btrl    %eax, _checkstate_pending_ast
315         lock    
316         btrl    %eax, CNAME(resched_cpus)
317         jnc     2f
318         orl     $AST_PENDING+AST_RESCHED, PCPU(ASTPENDING)
319         lock
320         incl    CNAME(want_resched_cnt)
321 2:              
322         lock
323         incl    CNAME(cpuast_cnt)
324         decl    P_INTR_NESTING_LEVEL(%ebx)
325         MEXITCOUNT
326         jmp     _doreti
327 1:
328         /* We are already in the process of delivering an ast for this CPU */
329         POP_FRAME
330         iret                    
331
332 /*
333  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
334  *
335  *  - Signals its receipt.
336  *  - Waits for permission to restart.
337  *  - Signals its restart.
338  */
339
340         .text
341         SUPERALIGN_TEXT
342         .globl _Xcpustop
343 _Xcpustop:
344         pushl   %ebp
345         movl    %esp, %ebp
346         pushl   %eax
347         pushl   %ecx
348         pushl   %edx
349         pushl   %ds                     /* save current data segment */
350         pushl   %fs
351
352         movl    $KDSEL, %eax
353         mov     %ax, %ds                /* use KERNEL data segment */
354         movl    $KPSEL, %eax
355         mov     %ax, %fs
356
357         movl    $0, _lapic+LA_EOI       /* End Of Interrupt to APIC */
358
359         movl    PCPU(CPUID), %eax
360         imull   $PCB_SIZE, %eax
361         leal    CNAME(stoppcbs)(%eax), %eax
362         pushl   %eax
363         call    CNAME(savectx)          /* Save process context */
364         addl    $4, %esp
365         
366                 
367         movl    PCPU(CPUID), %eax
368
369         lock
370         btsl    %eax, _stopped_cpus     /* stopped_cpus |= (1<<id) */
371 1:
372         btl     %eax, _started_cpus     /* while (!(started_cpus & (1<<id))) */
373         jnc     1b
374
375         lock
376         btrl    %eax, _started_cpus     /* started_cpus &= ~(1<<id) */
377         lock
378         btrl    %eax, _stopped_cpus     /* stopped_cpus &= ~(1<<id) */
379
380         test    %eax, %eax
381         jnz     2f
382
383         movl    CNAME(cpustop_restartfunc), %eax
384         test    %eax, %eax
385         jz      2f
386         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
387
388         call    *%eax
389 2:
390         popl    %fs
391         popl    %ds                     /* restore previous data segment */
392         popl    %edx
393         popl    %ecx
394         popl    %eax
395         movl    %ebp, %esp
396         popl    %ebp
397         iret
398
399
400 MCOUNT_LABEL(bintr)
401         FAST_INTR(0,fastintr0)
402         FAST_INTR(1,fastintr1)
403         FAST_INTR(2,fastintr2)
404         FAST_INTR(3,fastintr3)
405         FAST_INTR(4,fastintr4)
406         FAST_INTR(5,fastintr5)
407         FAST_INTR(6,fastintr6)
408         FAST_INTR(7,fastintr7)
409         FAST_INTR(8,fastintr8)
410         FAST_INTR(9,fastintr9)
411         FAST_INTR(10,fastintr10)
412         FAST_INTR(11,fastintr11)
413         FAST_INTR(12,fastintr12)
414         FAST_INTR(13,fastintr13)
415         FAST_INTR(14,fastintr14)
416         FAST_INTR(15,fastintr15)
417         FAST_INTR(16,fastintr16)
418         FAST_INTR(17,fastintr17)
419         FAST_INTR(18,fastintr18)
420         FAST_INTR(19,fastintr19)
421         FAST_INTR(20,fastintr20)
422         FAST_INTR(21,fastintr21)
423         FAST_INTR(22,fastintr22)
424         FAST_INTR(23,fastintr23)
425         FAST_INTR(24,fastintr24)
426         FAST_INTR(25,fastintr25)
427         FAST_INTR(26,fastintr26)
428         FAST_INTR(27,fastintr27)
429         FAST_INTR(28,fastintr28)
430         FAST_INTR(29,fastintr29)
431         FAST_INTR(30,fastintr30)
432         FAST_INTR(31,fastintr31)
433 #define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
434 /* Threaded interrupts */
435         INTR(0,intr0, CLKINTR_PENDING)
436         INTR(1,intr1,)
437         INTR(2,intr2,)
438         INTR(3,intr3,)
439         INTR(4,intr4,)
440         INTR(5,intr5,)
441         INTR(6,intr6,)
442         INTR(7,intr7,)
443         INTR(8,intr8,)
444         INTR(9,intr9,)
445         INTR(10,intr10,)
446         INTR(11,intr11,)
447         INTR(12,intr12,)
448         INTR(13,intr13,)
449         INTR(14,intr14,)
450         INTR(15,intr15,)
451         INTR(16,intr16,)
452         INTR(17,intr17,)
453         INTR(18,intr18,)
454         INTR(19,intr19,)
455         INTR(20,intr20,)
456         INTR(21,intr21,)
457         INTR(22,intr22,)
458         INTR(23,intr23,)
459         INTR(24,intr24,)
460         INTR(25,intr25,)
461         INTR(26,intr26,)
462         INTR(27,intr27,)
463         INTR(28,intr28,)
464         INTR(29,intr29,)
465         INTR(30,intr30,)
466         INTR(31,intr31,)
467 MCOUNT_LABEL(eintr)
468
469 /*
470  * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
471  *
472  * - Calls the generic rendezvous action function.
473  */
474         .text
475         SUPERALIGN_TEXT
476         .globl  _Xrendezvous
477 _Xrendezvous:
478         PUSH_FRAME
479         movl    $KDSEL, %eax
480         mov     %ax, %ds                /* use KERNEL data segment */
481         mov     %ax, %es
482         movl    $KPSEL, %eax
483         mov     %ax, %fs
484
485         call    _smp_rendezvous_action
486
487         movl    $0, _lapic+LA_EOI       /* End Of Interrupt to APIC */
488         POP_FRAME
489         iret
490         
491         
492         .data
493
494 #ifdef COUNT_XINVLTLB_HITS
495         .globl  _xhits
496 _xhits:
497         .space  (NCPU * 4), 0
498 #endif /* COUNT_XINVLTLB_HITS */
499
500 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
501         .globl _stopped_cpus, _started_cpus
502 _stopped_cpus:
503         .long   0
504 _started_cpus:
505         .long   0
506
507 #ifdef BETTER_CLOCK
508         .globl _checkstate_probed_cpus
509 _checkstate_probed_cpus:
510         .long   0       
511 #endif /* BETTER_CLOCK */
512         .globl _checkstate_need_ast
513 _checkstate_need_ast:
514         .long   0
515 _checkstate_pending_ast:
516         .long   0
517         .globl CNAME(forward_irq_misscnt)
518         .globl CNAME(forward_irq_toodeepcnt)
519         .globl CNAME(forward_irq_hitcnt)
520         .globl CNAME(resched_cpus)
521         .globl CNAME(want_resched_cnt)
522         .globl CNAME(cpuast_cnt)
523         .globl CNAME(cpustop_restartfunc)
524 CNAME(forward_irq_misscnt):     
525         .long 0
526 CNAME(forward_irq_hitcnt):      
527         .long 0
528 CNAME(forward_irq_toodeepcnt):
529         .long 0
530 CNAME(resched_cpus):
531         .long 0
532 CNAME(want_resched_cnt):
533         .long 0
534 CNAME(cpuast_cnt):
535         .long 0
536 CNAME(cpustop_restartfunc):
537         .long 0
538
539         .globl  _apic_pin_trigger
540 _apic_pin_trigger:
541         .long   0
542
543         .text