]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/i386/i386/apic_vector.s
This commit was generated by cvs2svn to compensate for changes in r68670,
[FreeBSD/FreeBSD.git] / sys / i386 / i386 / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD$
4  */
5
6
7 #include <machine/apic.h>
8 #include <machine/smp.h>
9
10 #include "i386/isa/intr_machdep.h"
11
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_BIT(irq_num)        (1 << (irq_num))
14
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
17
18 /*
19  * 
20  */
21 #define PUSH_FRAME                                                      \
22         pushl   $0 ;            /* dummy error code */                  \
23         pushl   $0 ;            /* dummy trap type */                   \
24         pushal ;                                                        \
25         pushl   %ds ;           /* save data and extra segments ... */  \
26         pushl   %es ;                                                   \
27         pushl   %fs
28
29 #define POP_FRAME                                                       \
30         popl    %fs ;                                                   \
31         popl    %es ;                                                   \
32         popl    %ds ;                                                   \
33         popal ;                                                         \
34         addl    $4+4,%esp
35
36 /*
37  * Macros for interrupt entry, call to handler, and exit.
38  */
39
40 #define FAST_INTR(irq_num, vec_name)                                    \
41         .text ;                                                         \
42         SUPERALIGN_TEXT ;                                               \
43 IDTVEC(vec_name) ;                                                      \
44         PUSH_FRAME ;                                                    \
45         movl    $KDSEL,%eax ;                                           \
46         mov     %ax,%ds ;                                               \
47         mov     %ax,%es ;                                               \
48         movl    $KPSEL,%eax ;                                           \
49         mov     %ax,%fs ;                                               \
50         FAKE_MCOUNT(13*4(%esp)) ;                                       \
51         incb    _intr_nesting_level ;                                   \
52         pushl   _intr_unit + (irq_num) * 4 ;                            \
53         call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
54         addl    $4, %esp ;                                              \
55         movl    $0, lapic_eoi ;                                         \
56         lock ;                                                          \
57         incl    _cnt+V_INTR ;   /* book-keeping can wait */             \
58         movl    _intr_countp + (irq_num) * 4, %eax ;                    \
59         lock ;                                                          \
60         incl    (%eax) ;                                                \
61         MEXITCOUNT ;                                                    \
62         jmp     doreti_next
63
64 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
65 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
66         
67 #define MASK_IRQ(irq_num)                                               \
68         IMASK_LOCK ;                            /* into critical reg */ \
69         testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
70         jne     7f ;                    /* masked, don't mask */        \
71         orl     $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */  \
72         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
73         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
74         movl    %eax, (%ecx) ;                  /* write the index */   \
75         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
76         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
77         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
78 7: ;                                            /* already masked */    \
79         IMASK_UNLOCK
80 /*
81  * Test to see whether we are handling an edge or level triggered INT.
82  *  Level-triggered INTs must still be masked as we don't clear the source,
83  *  and the EOI cycle would cause redundant INTs to occur.
84  */
85 #define MASK_LEVEL_IRQ(irq_num)                                         \
86         testl   $IRQ_BIT(irq_num), _apic_pin_trigger ;                  \
87         jz      9f ;                            /* edge, don't mask */  \
88         MASK_IRQ(irq_num) ;                                             \
89 9:
90
91
92 #ifdef APIC_INTR_REORDER
93 #define EOI_IRQ(irq_num)                                                \
94         movl    _apic_isrbit_location + 8 * (irq_num), %eax ;           \
95         movl    (%eax), %eax ;                                          \
96         testl   _apic_isrbit_location + 4 + 8 * (irq_num), %eax ;       \
97         jz      9f ;                            /* not active */        \
98         movl    $0, lapic_eoi ;                                         \
99         APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
100 9:
101
102 #else
103 #define EOI_IRQ(irq_num)                                                \
104         testl   $IRQ_BIT(irq_num), lapic_isr1;                          \
105         jz      9f      ;                       /* not active */        \
106         movl    $0, lapic_eoi;                                          \
107         APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
108 9:
109 #endif
110         
111         
112 /*
113  * Test to see if the source is currently masked, clear if so.
114  */
115 #define UNMASK_IRQ(irq_num)                                     \
116         IMASK_LOCK ;                            /* into critical reg */ \
117         testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
118         je      7f ;                    /* bit clear, not masked */     \
119         andl    $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */    \
120         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
121         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
122         movl    %eax,(%ecx) ;                   /* write the index */   \
123         movl    IOAPIC_WINDOW(%ecx),%eax ;      /* current value */     \
124         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
125         movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
126 7: ;                                                                    \
127         IMASK_UNLOCK
128
129 #ifdef APIC_INTR_DIAGNOSTIC
130 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
131 log_intr_event:
132         pushf
133         cli
134         pushl   $CNAME(apic_itrace_debuglock)
135         call    CNAME(s_lock_np)
136         addl    $4, %esp
137         movl    CNAME(apic_itrace_debugbuffer_idx), %ecx
138         andl    $32767, %ecx
139         movl    _cpuid, %eax
140         shll    $8,     %eax
141         orl     8(%esp), %eax
142         movw    %ax,    CNAME(apic_itrace_debugbuffer)(,%ecx,2)
143         incl    %ecx
144         andl    $32767, %ecx
145         movl    %ecx,   CNAME(apic_itrace_debugbuffer_idx)
146         pushl   $CNAME(apic_itrace_debuglock)
147         call    CNAME(s_unlock_np)
148         addl    $4, %esp
149         popf
150         ret
151         
152
153 #define APIC_ITRACE(name, irq_num, id)                                  \
154         lock ;                                  /* MP-safe */           \
155         incl    CNAME(name) + (irq_num) * 4 ;                           \
156         pushl   %eax ;                                                  \
157         pushl   %ecx ;                                                  \
158         pushl   %edx ;                                                  \
159         movl    $(irq_num), %eax ;                                      \
160         cmpl    $APIC_INTR_DIAGNOSTIC_IRQ, %eax ;                       \
161         jne     7f ;                                                    \
162         pushl   $id ;                                                   \
163         call    log_intr_event ;                                        \
164         addl    $4, %esp ;                                              \
165 7: ;                                                                    \
166         popl    %edx ;                                                  \
167         popl    %ecx ;                                                  \
168         popl    %eax
169 #else
170 #define APIC_ITRACE(name, irq_num, id)                                  \
171         lock ;                                  /* MP-safe */           \
172         incl    CNAME(name) + (irq_num) * 4
173 #endif
174
175 #define APIC_ITRACE_ENTER 1
176 #define APIC_ITRACE_EOI 2
177 #define APIC_ITRACE_TRYISRLOCK 3
178 #define APIC_ITRACE_GOTISRLOCK 4
179 #define APIC_ITRACE_ENTER2 5
180 #define APIC_ITRACE_LEAVE 6
181 #define APIC_ITRACE_UNMASK 7
182 #define APIC_ITRACE_ACTIVE 8
183 #define APIC_ITRACE_MASKED 9
184 #define APIC_ITRACE_NOISRLOCK 10
185 #define APIC_ITRACE_MASKED2 11
186 #define APIC_ITRACE_SPLZ 12
187 #define APIC_ITRACE_DORETI 13   
188         
189 #else   
190 #define APIC_ITRACE(name, irq_num, id)
191 #endif
192
193 /* 
194  * Slow, threaded interrupts.
195  *
196  * XXX Most of the parameters here are obsolete.  Fix this when we're
197  * done.
198  * XXX we really shouldn't return via doreti if we just schedule the
199  * interrupt handler and don't run anything.  We could just do an
200  * iret.  FIXME.
201  */
202 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
203         .text ;                                                         \
204         SUPERALIGN_TEXT ;                                               \
205 /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */  \
206 IDTVEC(vec_name) ;                                                      \
207         PUSH_FRAME ;                                                    \
208         movl    $KDSEL, %eax ;  /* reload with kernel's data segment */ \
209         mov     %ax, %ds ;                                              \
210         mov     %ax, %es ;                                              \
211         movl    $KPSEL, %eax ;                                          \
212         mov     %ax, %fs ;                                              \
213 ;                                                                       \
214         maybe_extra_ipending ;                                          \
215 ;                                                                       \
216         APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;    \
217 ;                                                                       \
218         MASK_LEVEL_IRQ(irq_num) ;                                       \
219         EOI_IRQ(irq_num) ;                                              \
220 0: ;                                                                    \
221         incb    _intr_nesting_level ;                                   \
222 ;                                                                       \
223   /* entry point used by doreti_unpend for HWIs. */                     \
224 __CONCAT(Xresume,irq_num): ;                                            \
225         FAKE_MCOUNT(13*4(%esp)) ;               /* XXX avoid dbl cnt */ \
226         pushl   $irq_num;                       /* pass the IRQ */      \
227         APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;  \
228         sti ;                                                           \
229         call    _sched_ithd ;                                           \
230         addl    $4, %esp ;              /* discard the parameter */     \
231         APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;    \
232 ;                                                                       \
233         MEXITCOUNT ;                                                    \
234         jmp     doreti_next
235
236 /*
237  * Handle "spurious INTerrupts".
238  * Notes:
239  *  This is different than the "spurious INTerrupt" generated by an
240  *   8259 PIC for missing INTs.  See the APIC documentation for details.
241  *  This routine should NOT do an 'EOI' cycle.
242  */
243         .text
244         SUPERALIGN_TEXT
245         .globl _Xspuriousint
246 _Xspuriousint:
247
248         /* No EOI cycle used here */
249
250         iret
251
252
253 /*
254  * Handle TLB shootdowns.
255  */
256         .text
257         SUPERALIGN_TEXT
258         .globl  _Xinvltlb
259 _Xinvltlb:
260         pushl   %eax
261
262 #ifdef COUNT_XINVLTLB_HITS
263         pushl   %fs
264         movl    $KPSEL, %eax
265         mov     %ax, %fs
266         movl    _cpuid, %eax
267         popl    %fs
268         ss
269         incl    _xhits(,%eax,4)
270 #endif /* COUNT_XINVLTLB_HITS */
271
272         movl    %cr3, %eax              /* invalidate the TLB */
273         movl    %eax, %cr3
274
275         ss                              /* stack segment, avoid %ds load */
276         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
277
278         popl    %eax
279         iret
280
281
282 #ifdef BETTER_CLOCK
283
284 /*
285  * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
286  *
287  *  - Stores current cpu state in checkstate_cpustate[cpuid]
288  *      0 == user, 1 == sys, 2 == intr
289  *  - Stores current process in checkstate_curproc[cpuid]
290  *
291  *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
292  *
293  * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
294  */
295
296         .text
297         SUPERALIGN_TEXT
298         .globl _Xcpucheckstate
299         .globl _checkstate_cpustate
300         .globl _checkstate_curproc
301         .globl _checkstate_pc
302 _Xcpucheckstate:
303         pushl   %eax
304         pushl   %ebx            
305         pushl   %ds                     /* save current data segment */
306         pushl   %fs
307
308         movl    $KDSEL, %eax
309         mov     %ax, %ds                /* use KERNEL data segment */
310         movl    $KPSEL, %eax
311         mov     %ax, %fs
312
313         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
314
315         movl    $0, %ebx                
316         movl    20(%esp), %eax  
317         andl    $3, %eax
318         cmpl    $3, %eax
319         je      1f
320         testl   $PSL_VM, 24(%esp)
321         jne     1f
322         incl    %ebx                    /* system or interrupt */
323 1:      
324         movl    _cpuid, %eax
325         movl    %ebx, _checkstate_cpustate(,%eax,4)
326         movl    _curproc, %ebx
327         movl    %ebx, _checkstate_curproc(,%eax,4)
328         movl    16(%esp), %ebx
329         movl    %ebx, _checkstate_pc(,%eax,4)
330
331         lock                            /* checkstate_probed_cpus |= (1<<id) */
332         btsl    %eax, _checkstate_probed_cpus
333
334         popl    %fs
335         popl    %ds                     /* restore previous data segment */
336         popl    %ebx
337         popl    %eax
338         iret
339
340 #endif /* BETTER_CLOCK */
341
342 /*
343  * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
344  *
345  *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
346  *
347  *  - We need a better method of triggering asts on other cpus.
348  */
349
350         .text
351         SUPERALIGN_TEXT
352         .globl _Xcpuast
353 _Xcpuast:
354         PUSH_FRAME
355         movl    $KDSEL, %eax
356         mov     %ax, %ds                /* use KERNEL data segment */
357         mov     %ax, %es
358         movl    $KPSEL, %eax
359         mov     %ax, %fs
360
361         movl    _cpuid, %eax
362         lock                            /* checkstate_need_ast &= ~(1<<id) */
363         btrl    %eax, _checkstate_need_ast
364         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
365
366         lock
367         btsl    %eax, _checkstate_pending_ast
368         jc      1f
369
370         FAKE_MCOUNT(13*4(%esp))
371
372         orl     $AST_PENDING, _astpending       /* XXX */
373         incb    _intr_nesting_level
374         sti
375         
376         movl    _cpuid, %eax
377         lock    
378         btrl    %eax, _checkstate_pending_ast
379         lock    
380         btrl    %eax, CNAME(resched_cpus)
381         jnc     2f
382         orl     $AST_PENDING+AST_RESCHED,_astpending
383         lock
384         incl    CNAME(want_resched_cnt)
385 2:              
386         lock
387         incl    CNAME(cpuast_cnt)
388         MEXITCOUNT
389         jmp     doreti_next
390 1:
391         /* We are already in the process of delivering an ast for this CPU */
392         POP_FRAME
393         iret                    
394
395
396 /*
397  *       Executed by a CPU when it receives an XFORWARD_IRQ IPI.
398  */
399
400         .text
401         SUPERALIGN_TEXT
402         .globl _Xforward_irq
403 _Xforward_irq:
404         PUSH_FRAME
405         movl    $KDSEL, %eax
406         mov     %ax, %ds                /* use KERNEL data segment */
407         mov     %ax, %es
408         movl    $KPSEL, %eax
409         mov     %ax, %fs
410
411         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
412
413         FAKE_MCOUNT(13*4(%esp))
414
415         lock
416         incl    CNAME(forward_irq_hitcnt)
417         cmpb    $4, _intr_nesting_level
418         jae     1f
419         
420         incb    _intr_nesting_level
421         sti
422         
423         MEXITCOUNT
424         jmp     doreti_next             /* Handle forwarded interrupt */
425 1:
426         lock
427         incl    CNAME(forward_irq_toodeepcnt)
428         MEXITCOUNT
429         POP_FRAME
430         iret
431
432 #if 0
433 /*
434  * 
435  */
436 forward_irq:
437         MCOUNT
438         cmpl    $0,_invltlb_ok
439         jz      4f
440
441         cmpl    $0, CNAME(forward_irq_enabled)
442         jz      4f
443
444 /* XXX - this is broken now, because mp_lock doesn't exist
445         movl    _mp_lock,%eax
446         cmpl    $FREE_LOCK,%eax
447         jne     1f
448  */
449         movl    $0, %eax                /* Pick CPU #0 if noone has lock */
450 1:
451         shrl    $24,%eax
452         movl    _cpu_num_to_apic_id(,%eax,4),%ecx
453         shll    $24,%ecx
454         movl    lapic_icr_hi, %eax
455         andl    $~APIC_ID_MASK, %eax
456         orl     %ecx, %eax
457         movl    %eax, lapic_icr_hi
458
459 2:
460         movl    lapic_icr_lo, %eax
461         andl    $APIC_DELSTAT_MASK,%eax
462         jnz     2b
463         movl    lapic_icr_lo, %eax
464         andl    $APIC_RESV2_MASK, %eax
465         orl     $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
466         movl    %eax, lapic_icr_lo
467 3:
468         movl    lapic_icr_lo, %eax
469         andl    $APIC_DELSTAT_MASK,%eax
470         jnz     3b
471 4:              
472         ret
473 #endif
474         
475 /*
476  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
477  *
478  *  - Signals its receipt.
479  *  - Waits for permission to restart.
480  *  - Signals its restart.
481  */
482
483         .text
484         SUPERALIGN_TEXT
485         .globl _Xcpustop
486 _Xcpustop:
487         pushl   %ebp
488         movl    %esp, %ebp
489         pushl   %eax
490         pushl   %ecx
491         pushl   %edx
492         pushl   %ds                     /* save current data segment */
493         pushl   %fs
494
495         movl    $KDSEL, %eax
496         mov     %ax, %ds                /* use KERNEL data segment */
497         movl    $KPSEL, %eax
498         mov     %ax, %fs
499
500         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
501
502         movl    _cpuid, %eax
503         imull   $PCB_SIZE, %eax
504         leal    CNAME(stoppcbs)(%eax), %eax
505         pushl   %eax
506         call    CNAME(savectx)          /* Save process context */
507         addl    $4, %esp
508         
509                 
510         movl    _cpuid, %eax
511
512         lock
513         btsl    %eax, _stopped_cpus     /* stopped_cpus |= (1<<id) */
514 1:
515         btl     %eax, _started_cpus     /* while (!(started_cpus & (1<<id))) */
516         jnc     1b
517
518         lock
519         btrl    %eax, _started_cpus     /* started_cpus &= ~(1<<id) */
520         lock
521         btrl    %eax, _stopped_cpus     /* stopped_cpus &= ~(1<<id) */
522
523         test    %eax, %eax
524         jnz     2f
525
526         movl    CNAME(cpustop_restartfunc), %eax
527         test    %eax, %eax
528         jz      2f
529         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
530
531         call    *%eax
532 2:
533         popl    %fs
534         popl    %ds                     /* restore previous data segment */
535         popl    %edx
536         popl    %ecx
537         popl    %eax
538         movl    %ebp, %esp
539         popl    %ebp
540         iret
541
542
543 MCOUNT_LABEL(bintr)
544         FAST_INTR(0,fastintr0)
545         FAST_INTR(1,fastintr1)
546         FAST_INTR(2,fastintr2)
547         FAST_INTR(3,fastintr3)
548         FAST_INTR(4,fastintr4)
549         FAST_INTR(5,fastintr5)
550         FAST_INTR(6,fastintr6)
551         FAST_INTR(7,fastintr7)
552         FAST_INTR(8,fastintr8)
553         FAST_INTR(9,fastintr9)
554         FAST_INTR(10,fastintr10)
555         FAST_INTR(11,fastintr11)
556         FAST_INTR(12,fastintr12)
557         FAST_INTR(13,fastintr13)
558         FAST_INTR(14,fastintr14)
559         FAST_INTR(15,fastintr15)
560         FAST_INTR(16,fastintr16)
561         FAST_INTR(17,fastintr17)
562         FAST_INTR(18,fastintr18)
563         FAST_INTR(19,fastintr19)
564         FAST_INTR(20,fastintr20)
565         FAST_INTR(21,fastintr21)
566         FAST_INTR(22,fastintr22)
567         FAST_INTR(23,fastintr23)
568 #define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
569 /* Threaded interrupts */
570         INTR(0,intr0, CLKINTR_PENDING)
571         INTR(1,intr1,)
572         INTR(2,intr2,)
573         INTR(3,intr3,)
574         INTR(4,intr4,)
575         INTR(5,intr5,)
576         INTR(6,intr6,)
577         INTR(7,intr7,)
578         INTR(8,intr8,)
579         INTR(9,intr9,)
580         INTR(10,intr10,)
581         INTR(11,intr11,)
582         INTR(12,intr12,)
583         INTR(13,intr13,)
584         INTR(14,intr14,)
585         INTR(15,intr15,)
586         INTR(16,intr16,)
587         INTR(17,intr17,)
588         INTR(18,intr18,)
589         INTR(19,intr19,)
590         INTR(20,intr20,)
591         INTR(21,intr21,)
592         INTR(22,intr22,)
593         INTR(23,intr23,)
594 MCOUNT_LABEL(eintr)
595
596 /*
597  * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
598  *
599  * - Calls the generic rendezvous action function.
600  */
601         .text
602         SUPERALIGN_TEXT
603         .globl  _Xrendezvous
604 _Xrendezvous:
605         PUSH_FRAME
606         movl    $KDSEL, %eax
607         mov     %ax, %ds                /* use KERNEL data segment */
608         mov     %ax, %es
609         movl    $KPSEL, %eax
610         mov     %ax, %fs
611
612         call    _smp_rendezvous_action
613
614         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
615         POP_FRAME
616         iret
617         
618         
619         .data
620 #if 0
621 /* active flag for lazy masking */
622 iactive:
623         .long   0
624 #endif
625
626 #ifdef COUNT_XINVLTLB_HITS
627         .globl  _xhits
628 _xhits:
629         .space  (NCPU * 4), 0
630 #endif /* COUNT_XINVLTLB_HITS */
631
632 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
633         .globl _stopped_cpus, _started_cpus
634 _stopped_cpus:
635         .long   0
636 _started_cpus:
637         .long   0
638
639 #ifdef BETTER_CLOCK
640         .globl _checkstate_probed_cpus
641 _checkstate_probed_cpus:
642         .long   0       
643 #endif /* BETTER_CLOCK */
644         .globl _checkstate_need_ast
645 _checkstate_need_ast:
646         .long   0
647 _checkstate_pending_ast:
648         .long   0
649         .globl CNAME(forward_irq_misscnt)
650         .globl CNAME(forward_irq_toodeepcnt)
651         .globl CNAME(forward_irq_hitcnt)
652         .globl CNAME(resched_cpus)
653         .globl CNAME(want_resched_cnt)
654         .globl CNAME(cpuast_cnt)
655         .globl CNAME(cpustop_restartfunc)
656 CNAME(forward_irq_misscnt):     
657         .long 0
658 CNAME(forward_irq_hitcnt):      
659         .long 0
660 CNAME(forward_irq_toodeepcnt):
661         .long 0
662 CNAME(resched_cpus):
663         .long 0
664 CNAME(want_resched_cnt):
665         .long 0
666 CNAME(cpuast_cnt):
667         .long 0
668 CNAME(cpustop_restartfunc):
669         .long 0
670                 
671
672
673         .globl  _apic_pin_trigger
674 _apic_pin_trigger:
675         .long   0
676
677         .text