]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/xen/evtchn/evtchn.c
add -n option to suppress clearing the build tree and add -DNO_CLEAN
[FreeBSD/FreeBSD.git] / sys / xen / evtchn / evtchn.c
1 /******************************************************************************
2  * evtchn.c
3  * 
4  * Communication via Xen event channels.
5  * 
6  * Copyright (c) 2002-2005, K A Fraser
7  * Copyright (c) 2005-2006 Kip Macy
8  */
9
10 #include <sys/cdefs.h>
11 __FBSDID("$FreeBSD$");
12
13 #include <sys/param.h>
14 #include <sys/systm.h>
15 #include <sys/bus.h>
16 #include <sys/malloc.h>
17 #include <sys/kernel.h>
18 #include <sys/lock.h>
19 #include <sys/mutex.h>
20 #include <sys/interrupt.h>
21
22 #include <machine/cpufunc.h>
23 #include <machine/intr_machdep.h>
24 #include <machine/xen/xen-os.h>
25 #include <machine/xen/xen_intr.h>
26 #include <machine/xen/synch_bitops.h>
27 #include <machine/xen/evtchn.h>
28 #include <machine/xen/hypervisor.h>
29
30
31
32 /* linux helper functions that got sucked in 
33  * rename and move XXX
34  */
35
36
37 static inline int find_first_bit(const unsigned long *addr, unsigned size)
38 {
39         int d0, d1;
40         int res;
41
42         /* This looks at memory. Mark it volatile to tell gcc not to move it around */
43         __asm__ __volatile__(
44                 "xorl %%eax,%%eax\n\t"
45                 "repe; scasl\n\t"
46                 "jz 1f\n\t"
47                 "leal -4(%%edi),%%edi\n\t"
48                 "bsfl (%%edi),%%eax\n"
49                 "1:\tsubl %%ebx,%%edi\n\t"
50                 "shll $3,%%edi\n\t"
51                 "addl %%edi,%%eax"
52                 :"=a" (res), "=&c" (d0), "=&D" (d1)
53                 :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
54         return res;
55 }
56
57 #define min_t(type,x,y) \
58         ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
59 #define first_cpu(src) __first_cpu(&(src), NR_CPUS)
60 static inline int __first_cpu(const xen_cpumask_t *srcp, int nbits)
61 {
62         return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
63 }
64
65 static inline unsigned long __ffs(unsigned long word)
66 {
67         __asm__("bsfl %1,%0"
68                 :"=r" (word)
69                 :"rm" (word));
70         return word;
71 }
72
73 static struct mtx irq_mapping_update_lock;
74 static struct xenpic *xp;
75 struct xenpic_intsrc {
76         struct intsrc     xp_intsrc;
77         uint8_t           xp_vector;
78         boolean_t         xp_masked;
79 };
80
81 struct xenpic { 
82         struct pic           *xp_dynirq_pic; 
83         struct pic           *xp_pirq_pic;   
84         uint16_t             xp_numintr; 
85         struct xenpic_intsrc xp_pins[0]; 
86 }; 
87
88 #define TODO            printf("%s: not implemented!\n", __func__) 
89
90 /* IRQ <-> event-channel mappings. */
91 static int evtchn_to_irq[NR_EVENT_CHANNELS];
92
93 /* Packed IRQ information: binding type, sub-type index, and event channel. */
94 static uint32_t irq_info[NR_IRQS];
95 /* Binding types. */
96 enum {
97         IRQT_UNBOUND,
98         IRQT_PIRQ,
99         IRQT_VIRQ,
100         IRQT_IPI,
101         IRQT_LOCAL_PORT,
102         IRQT_CALLER_PORT
103 };
104
105 /* Constructor for packed IRQ information. */
106 #define mk_irq_info(type, index, evtchn)                                \
107         (((uint32_t)(type) << 24) | ((uint32_t)(index) << 16) | (uint32_t)(evtchn))
108 /* Convenient shorthand for packed representation of an unbound IRQ. */
109 #define IRQ_UNBOUND     mk_irq_info(IRQT_UNBOUND, 0, 0)
110 /* Accessor macros for packed IRQ information. */
111 #define evtchn_from_irq(irq) ((uint16_t)(irq_info[irq]))
112 #define index_from_irq(irq)  ((uint8_t)(irq_info[irq] >> 16))
113 #define type_from_irq(irq)   ((uint8_t)(irq_info[irq] >> 24))
114
115 /* IRQ <-> VIRQ mapping. */ 
116 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; 
117  
118 /* IRQ <-> IPI mapping. */ 
119 #ifndef NR_IPIS 
120 #define NR_IPIS 1 
121 #endif 
122 DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1}; 
123
124 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
125 static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
126
127 /* Reference counts for bindings to IRQs. */
128 static int irq_bindcount[NR_IRQS];
129
130 #define VALID_EVTCHN(_chn) ((_chn) != 0)
131
132 #ifdef CONFIG_SMP
133
134 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
135 static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
136
137 #define active_evtchns(cpu,sh,idx)              \
138         ((sh)->evtchn_pending[idx] &            \
139          cpu_evtchn_mask[cpu][idx] &            \
140          ~(sh)->evtchn_mask[idx])
141
142 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
143 {
144         clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
145         set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
146         cpu_evtchn[chn] = cpu;
147 }
148
149 static void init_evtchn_cpu_bindings(void)
150 {
151         /* By default all event channels notify CPU#0. */
152         memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
153         memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
154 }
155
156 #define cpu_from_evtchn(evtchn)         (cpu_evtchn[evtchn])
157
158 #else
159
160 #define active_evtchns(cpu,sh,idx)              \
161         ((sh)->evtchn_pending[idx] &            \
162          ~(sh)->evtchn_mask[idx])
163 #define bind_evtchn_to_cpu(chn,cpu)     ((void)0)
164 #define init_evtchn_cpu_bindings()      ((void)0)
165 #define cpu_from_evtchn(evtchn)         (0)
166
167 #endif
168
169
170 /*
171  * Force a proper event-channel callback from Xen after clearing the
172  * callback mask. We do this in a very simple manner, by making a call
173  * down into Xen. The pending flag will be checked by Xen on return.
174  */
175 void force_evtchn_callback(void)
176 {
177         (void)HYPERVISOR_xen_version(0, NULL);
178 }
179
180 void 
181 evtchn_do_upcall(struct trapframe *frame) 
182 {
183         unsigned long  l1, l2;
184         unsigned int   l1i, l2i, port;
185         int            irq, cpu;
186         shared_info_t *s;
187         vcpu_info_t   *vcpu_info;
188         
189         cpu = smp_processor_id();
190         s = HYPERVISOR_shared_info;
191         vcpu_info = &s->vcpu_info[cpu];
192
193         vcpu_info->evtchn_upcall_pending = 0;
194
195         /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
196         l1 = xen_xchg(&vcpu_info->evtchn_pending_sel, 0);
197
198         while (l1 != 0) {
199                 l1i = __ffs(l1);
200                 l1 &= ~(1 << l1i);
201                 
202                 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
203                         l2i = __ffs(l2);
204
205                         port = (l1i * BITS_PER_LONG) + l2i;
206                         if ((irq = evtchn_to_irq[port]) != -1) {
207                                 struct intsrc *isrc = intr_lookup_source(irq);
208                                 /* 
209                                  * ack 
210                                  */
211                                 mask_evtchn(port);
212                                 clear_evtchn(port); 
213
214                                 intr_execute_handlers(isrc, frame);
215                         } else {
216                                 evtchn_device_upcall(port);
217                         }
218                 }
219         }
220 }
221
222 void
223 ipi_pcpu(unsigned int cpu, int vector) 
224
225         int irq = per_cpu(ipi_to_irq, cpu)[vector]; 
226
227         notify_remote_via_irq(irq); 
228
229
230 static int 
231 find_unbound_irq(void)
232 {
233         int dynirq, irq;
234         
235         for (dynirq = 0; dynirq < NR_IRQS; dynirq++) {
236                 irq = dynirq_to_irq(dynirq);
237                 if (irq_bindcount[irq] == 0)
238                         break;
239         }
240         
241         if (irq == NR_IRQS)
242                 panic("No available IRQ to bind to: increase NR_IRQS!\n");
243
244         return (irq);
245 }
246
247 static int
248 bind_caller_port_to_irq(unsigned int caller_port)
249 {
250         int irq;
251
252         mtx_lock_spin(&irq_mapping_update_lock);
253
254         if ((irq = evtchn_to_irq[caller_port]) == -1) {
255                 if ((irq = find_unbound_irq()) < 0)
256                         goto out;
257
258                 evtchn_to_irq[caller_port] = irq;
259                 irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
260         }
261
262         irq_bindcount[irq]++;
263
264  out:
265         mtx_unlock_spin(&irq_mapping_update_lock);
266         return irq;
267 }
268
269 static int
270 bind_local_port_to_irq(unsigned int local_port)
271 {
272         int irq;
273
274         mtx_lock_spin(&irq_mapping_update_lock);
275
276         PANIC_IF(evtchn_to_irq[local_port] != -1);
277
278         if ((irq = find_unbound_irq()) < 0) {
279                 struct evtchn_close close = { .port = local_port };
280                 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_close, &close));
281                 
282                 goto out;
283         }
284
285         evtchn_to_irq[local_port] = irq;
286         irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
287         irq_bindcount[irq]++;
288
289  out:
290         mtx_unlock_spin(&irq_mapping_update_lock);
291         return irq;
292 }
293
294 static int
295 bind_listening_port_to_irq(unsigned int remote_domain)
296 {
297         struct evtchn_alloc_unbound alloc_unbound;
298         int err;
299
300         alloc_unbound.dom        = DOMID_SELF;
301         alloc_unbound.remote_dom = remote_domain;
302
303         err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
304                                           &alloc_unbound);
305
306         return err ? : bind_local_port_to_irq(alloc_unbound.port);
307 }
308
309 static int
310 bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
311     unsigned int remote_port)
312 {
313         struct evtchn_bind_interdomain bind_interdomain;
314         int err;
315
316         bind_interdomain.remote_dom  = remote_domain;
317         bind_interdomain.remote_port = remote_port;
318
319         err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
320                                           &bind_interdomain);
321
322         return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
323 }
324
325 static int 
326 bind_virq_to_irq(unsigned int virq, unsigned int cpu)
327 {
328         struct evtchn_bind_virq bind_virq;
329         int evtchn, irq;
330
331         mtx_lock_spin(&irq_mapping_update_lock);
332
333         if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
334                 bind_virq.virq = virq;
335                 bind_virq.vcpu = cpu;
336                 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
337                         &bind_virq) != 0);
338
339                 evtchn = bind_virq.port;
340
341                 irq = find_unbound_irq();
342                 evtchn_to_irq[evtchn] = irq;
343                 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
344
345                 per_cpu(virq_to_irq, cpu)[virq] = irq;
346
347                 bind_evtchn_to_cpu(evtchn, cpu);
348         }
349
350         irq_bindcount[irq]++;
351
352         mtx_unlock_spin(&irq_mapping_update_lock);
353
354         return irq;
355 }
356
357 static int 
358 bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
359 {
360         struct evtchn_bind_ipi bind_ipi;
361         int evtchn, irq;
362         
363         mtx_lock_spin(&irq_mapping_update_lock);
364         
365         if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
366                 if ((irq = find_unbound_irq()) < 0)
367                         goto out;
368
369                 bind_ipi.vcpu = cpu;
370                 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0);
371                 evtchn = bind_ipi.port;
372
373                 irq = find_unbound_irq();
374                 evtchn_to_irq[evtchn] = irq;
375                 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
376
377                 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
378
379                 bind_evtchn_to_cpu(evtchn, cpu);
380         }
381
382         irq_bindcount[irq]++;
383 out:
384         
385         mtx_unlock_spin(&irq_mapping_update_lock);
386
387         return irq;
388 }
389
390
391 void 
392 unbind_from_irq(int irq)
393 {
394         struct evtchn_close close;
395         int evtchn = evtchn_from_irq(irq);
396
397         mtx_lock_spin(&irq_mapping_update_lock);
398
399         if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
400                 close.port = evtchn;
401                 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0);
402
403                 switch (type_from_irq(irq)) {
404                 case IRQT_VIRQ:
405                         per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))[index_from_irq(irq)] = -1;
406                         break;
407                 case IRQT_IPI:
408                         per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))[index_from_irq(irq)] = -1;
409                         break;
410                 default:
411                         break;
412                 }
413
414                 /* Closed ports are implicitly re-bound to VCPU0. */
415                 bind_evtchn_to_cpu(evtchn, 0);
416
417                 evtchn_to_irq[evtchn] = -1;
418                 irq_info[irq] = IRQ_UNBOUND;
419         }
420
421         mtx_unlock_spin(&irq_mapping_update_lock);
422 }
423
424 int 
425 bind_caller_port_to_irqhandler(unsigned int caller_port,
426                           const char *devname,
427                           driver_intr_t handler,
428                           void *arg,
429                           unsigned long irqflags,
430                           void **cookiep)
431 {
432         unsigned int irq;
433         int retval;
434
435         irq = bind_caller_port_to_irq(caller_port);
436         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
437         retval = intr_add_handler(devname, irq, NULL, handler, arg, irqflags, cookiep);
438         if (retval != 0) {
439                 unbind_from_irq(irq);
440                 return -retval;
441         }
442
443         return irq;
444 }
445
446 int 
447 bind_listening_port_to_irqhandler(
448                           unsigned int remote_domain,
449                           const char *devname,
450                           driver_intr_t handler,
451                           void *arg,
452                           unsigned long irqflags,
453                           void **cookiep)
454 {
455         unsigned int irq;
456         int retval;
457
458         irq = bind_listening_port_to_irq(remote_domain);
459         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
460         retval = intr_add_handler(devname, irq, NULL, handler, arg, irqflags, cookiep);
461         if (retval != 0) {
462                 unbind_from_irq(irq);
463                 return -retval;
464         }
465
466         return irq;
467 }
468
469 int 
470 bind_interdomain_evtchn_to_irqhandler(
471                         unsigned int remote_domain,
472                         unsigned int remote_port,
473                         const char *devname,
474                         driver_filter_t filter,
475                         driver_intr_t handler,
476                         unsigned long irqflags)
477 {
478         unsigned int irq;
479         int retval;
480
481         irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
482         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
483         retval = intr_add_handler(devname, irq, filter, handler, NULL, irqflags, NULL);
484         if (retval != 0) {
485                 unbind_from_irq(irq);
486                 return -retval;
487         }
488
489         return irq;
490 }
491
492 int 
493 bind_virq_to_irqhandler(unsigned int virq,
494                         unsigned int cpu,
495                         const char *devname,
496                         driver_filter_t filter,
497                         driver_intr_t handler,
498                         unsigned long irqflags)
499 {
500         unsigned int irq;
501         int retval;
502
503         irq = bind_virq_to_irq(virq, cpu);
504         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
505         retval = intr_add_handler(devname, irq, filter, handler, NULL, irqflags, NULL);
506         if (retval != 0) {
507                 unbind_from_irq(irq);
508                 return -retval;
509         }
510
511         return irq;
512 }
513
514 int 
515 bind_ipi_to_irqhandler(unsigned int ipi,
516                        unsigned int cpu,
517                        const char *devname,
518                        driver_intr_t handler,
519                        unsigned long irqflags)
520 {
521         unsigned int irq;
522         int retval;
523
524         irq = bind_ipi_to_irq(ipi, cpu);
525         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
526         retval = intr_add_handler(devname, irq, NULL, handler, NULL, irqflags, NULL);
527         if (retval != 0) {
528                 unbind_from_irq(irq);
529                 return -retval;
530         }
531
532         return irq;
533 }
534
535 void
536 unbind_from_irqhandler(unsigned int irq, void *dev_id)
537 {
538         if (dev_id)
539                 intr_remove_handler(dev_id); /* XXX */
540         unbind_from_irq(irq);
541 }
542
543 #if 0
544 /* Rebind an evtchn so that it gets delivered to a specific cpu */
545 static void
546 rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
547 {
548         evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
549         int evtchn;
550
551         mtx_lock_spin(&irq_mapping_update_lock);
552
553         evtchn = evtchn_from_irq(irq);
554         if (!VALID_EVTCHN(evtchn)) {
555                 mtx_unlock_spin(&irq_mapping_update_lock);
556                 return;
557         }
558
559         /* Send future instances of this interrupt to other vcpu. */
560         bind_vcpu.port = evtchn;
561         bind_vcpu.vcpu = tcpu;
562
563         /*
564          * If this fails, it usually just indicates that we're dealing with a 
565          * virq or IPI channel, which don't actually need to be rebound. Ignore
566          * it, but don't do the xenlinux-level rebind in that case.
567          */
568         if (HYPERVISOR_event_channel_op(&op) >= 0)
569                 bind_evtchn_to_cpu(evtchn, tcpu);
570
571         mtx_unlock_spin(&irq_mapping_update_lock);
572
573 }
574
575 static void set_affinity_irq(unsigned irq, xen_cpumask_t dest)
576 {
577         unsigned tcpu = first_cpu(dest);
578         rebind_irq_to_cpu(irq, tcpu);
579 }
580 #endif
581
582 /*
583  * Interface to generic handling in intr_machdep.c
584  */
585
586
587 /*------------ interrupt handling --------------------------------------*/
588 #define TODO            printf("%s: not implemented!\n", __func__) 
589
590
591 static void     xenpic_dynirq_enable_source(struct intsrc *isrc); 
592 static void     xenpic_dynirq_disable_source(struct intsrc *isrc, int); 
593 static void     xenpic_dynirq_eoi_source(struct intsrc *isrc); 
594 static void     xenpic_dynirq_enable_intr(struct intsrc *isrc); 
595 static void     xenpic_dynirq_disable_intr(struct intsrc *isrc); 
596
597 static void     xenpic_pirq_enable_source(struct intsrc *isrc); 
598 static void     xenpic_pirq_disable_source(struct intsrc *isrc, int); 
599 static void     xenpic_pirq_eoi_source(struct intsrc *isrc); 
600 static void     xenpic_pirq_enable_intr(struct intsrc *isrc); 
601 static void     xenpic_pirq_disable_intr(struct intsrc *isrc); 
602
603
604 static int      xenpic_vector(struct intsrc *isrc); 
605 static int      xenpic_source_pending(struct intsrc *isrc); 
606 static void     xenpic_suspend(struct pic* pic); 
607 static void     xenpic_resume(struct pic* pic); 
608 static void     xenpic_assign_cpu(struct intsrc *, u_int apic_id);
609
610
611 struct pic xenpic_dynirq_template  =  { 
612         .pic_enable_source      =       xenpic_dynirq_enable_source, 
613         .pic_disable_source     =       xenpic_dynirq_disable_source,
614         .pic_eoi_source         =       xenpic_dynirq_eoi_source, 
615         .pic_enable_intr        =       xenpic_dynirq_enable_intr, 
616         .pic_disable_intr       =       xenpic_dynirq_disable_intr, 
617         .pic_vector             =       xenpic_vector, 
618         .pic_source_pending     =       xenpic_source_pending,
619         .pic_suspend            =       xenpic_suspend, 
620         .pic_resume             =       xenpic_resume 
621 };
622
623 struct pic xenpic_pirq_template  =  { 
624         .pic_enable_source      =       xenpic_pirq_enable_source, 
625         .pic_disable_source     =       xenpic_pirq_disable_source,
626         .pic_eoi_source         =       xenpic_pirq_eoi_source, 
627         .pic_enable_intr        =       xenpic_pirq_enable_intr, 
628         .pic_disable_intr       =       xenpic_pirq_disable_intr, 
629         .pic_vector             =       xenpic_vector, 
630         .pic_source_pending     =       xenpic_source_pending,
631         .pic_suspend            =       xenpic_suspend, 
632         .pic_resume             =       xenpic_resume,
633         .pic_assign_cpu         =       xenpic_assign_cpu
634 };
635
636
637
638 void 
639 xenpic_dynirq_enable_source(struct intsrc *isrc)
640 {
641         unsigned int irq;
642         struct xenpic_intsrc *xp;
643
644         xp = (struct xenpic_intsrc *)isrc;
645         
646         mtx_lock_spin(&irq_mapping_update_lock);
647         if (xp->xp_masked) {
648                 irq = xenpic_vector(isrc);
649                 unmask_evtchn(evtchn_from_irq(irq));
650                 xp->xp_masked = FALSE;
651         }
652         mtx_unlock_spin(&irq_mapping_update_lock);
653 }
654
655 static void 
656 xenpic_dynirq_disable_source(struct intsrc *isrc, int foo)
657 {
658         unsigned int irq;
659         struct xenpic_intsrc *xp;
660         
661         xp = (struct xenpic_intsrc *)isrc;
662         
663         mtx_lock_spin(&irq_mapping_update_lock);
664         if (!xp->xp_masked) {
665                 irq = xenpic_vector(isrc);
666                 mask_evtchn(evtchn_from_irq(irq));
667                 xp->xp_masked = TRUE;
668         }       
669         mtx_unlock_spin(&irq_mapping_update_lock);
670 }
671
672 static void 
673 xenpic_dynirq_enable_intr(struct intsrc *isrc)
674 {
675         unsigned int irq;
676         struct xenpic_intsrc *xp;
677         
678         xp = (struct xenpic_intsrc *)isrc;      
679         mtx_lock_spin(&irq_mapping_update_lock);
680         xp->xp_masked = 0;
681         irq = xenpic_vector(isrc);
682         unmask_evtchn(evtchn_from_irq(irq));
683         mtx_unlock_spin(&irq_mapping_update_lock);
684 }
685
686 static void 
687 xenpic_dynirq_disable_intr(struct intsrc *isrc)
688 {
689         unsigned int irq;
690         struct xenpic_intsrc *xp;
691         
692         xp = (struct xenpic_intsrc *)isrc;      
693         mtx_lock_spin(&irq_mapping_update_lock);
694         xp->xp_masked = 1;
695         irq = xenpic_vector(isrc);
696         mask_evtchn(evtchn_from_irq(irq));
697         mtx_unlock_spin(&irq_mapping_update_lock);
698 }
699
700 static void 
701 xenpic_dynirq_eoi_source(struct intsrc *isrc)
702 {
703         unsigned int irq;
704         struct xenpic_intsrc *xp;
705         
706         xp = (struct xenpic_intsrc *)isrc;      
707         mtx_lock_spin(&irq_mapping_update_lock);
708         xp->xp_masked = 0;
709         irq = xenpic_vector(isrc);
710         unmask_evtchn(evtchn_from_irq(irq));
711         mtx_unlock_spin(&irq_mapping_update_lock);
712 }
713
714 static int
715 xenpic_vector(struct intsrc *isrc)
716 {
717     struct xenpic_intsrc *pin;
718
719     pin = (struct xenpic_intsrc *)isrc;
720    //printf("xenpic_vector(): isrc=%p,vector=%u\n", pin, pin->xp_vector);
721
722     return (pin->xp_vector);
723 }
724
725 static int
726 xenpic_source_pending(struct intsrc *isrc)
727 {
728     struct xenpic_intsrc *pin = (struct xenpic_intsrc *)isrc;
729
730         /* XXXEN: TODO */
731         printf("xenpic_source_pending(): vector=%x,masked=%x\n",
732             pin->xp_vector, pin->xp_masked);
733
734 /*      notify_remote_via_evtchn(pin->xp_vector); // XXX RS: Is this correct? */
735         return 0;
736 }
737
738 static void 
739 xenpic_suspend(struct pic* pic)
740
741         TODO; 
742
743  
744 static void 
745 xenpic_resume(struct pic* pic)
746
747         TODO; 
748 }
749
750 static void 
751 xenpic_assign_cpu(struct intsrc *isrc, u_int apic_id)
752
753         TODO; 
754 }
755
756 void
757 notify_remote_via_irq(int irq)
758 {
759         int evtchn = evtchn_from_irq(irq);
760
761         if (VALID_EVTCHN(evtchn))
762                 notify_remote_via_evtchn(evtchn);
763 }
764
765 /* required for support of physical devices */
766 static inline void 
767 pirq_unmask_notify(int pirq)
768 {
769         struct physdev_eoi eoi = { .irq = pirq };
770
771         if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
772                 (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
773         }
774 }
775
776 static inline void 
777 pirq_query_unmask(int pirq)
778 {
779         struct physdev_irq_status_query irq_status_query;
780
781         irq_status_query.irq = pirq;
782         (void)HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY, &irq_status_query);
783         clear_bit(pirq, &pirq_needs_unmask_notify[0]);
784         if ( irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
785                 set_bit(pirq, &pirq_needs_unmask_notify[0]);
786 }
787
788 /*
789  * On startup, if there is no action associated with the IRQ then we are
790  * probing. In this case we should not share with others as it will confuse us.
791  */
792 #define probing_irq(_irq) (intr_lookup_source(irq) == NULL)
793
794 static void 
795 xenpic_pirq_enable_intr(struct intsrc *isrc)
796 {
797         struct evtchn_bind_pirq bind_pirq;
798         int evtchn;
799         unsigned int irq;
800         
801         mtx_lock_spin(&irq_mapping_update_lock);
802         irq = xenpic_vector(isrc);
803         evtchn = evtchn_from_irq(irq);
804
805         if (VALID_EVTCHN(evtchn))
806                 goto out;
807
808         bind_pirq.pirq  = irq;
809         /* NB. We are happy to share unless we are probing. */
810         bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
811         
812         if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
813                 if (!probing_irq(irq)) /* Some failures are expected when probing. */
814                         printf("Failed to obtain physical IRQ %d\n", irq);
815                 mtx_unlock_spin(&irq_mapping_update_lock);
816                 return;
817         }
818         evtchn = bind_pirq.port;
819
820         pirq_query_unmask(irq_to_pirq(irq));
821
822         bind_evtchn_to_cpu(evtchn, 0);
823         evtchn_to_irq[evtchn] = irq;
824         irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
825
826  out:
827         unmask_evtchn(evtchn);
828         pirq_unmask_notify(irq_to_pirq(irq));
829         mtx_unlock_spin(&irq_mapping_update_lock);
830 }
831
832 static void 
833 xenpic_pirq_disable_intr(struct intsrc *isrc)
834 {
835         unsigned int irq;
836         int evtchn;
837         struct evtchn_close close;
838                         
839         mtx_lock_spin(&irq_mapping_update_lock);
840         irq = xenpic_vector(isrc);
841         evtchn = evtchn_from_irq(irq);
842
843         if (!VALID_EVTCHN(evtchn)) 
844                 goto done;
845         
846         mask_evtchn(evtchn);
847
848         close.port = evtchn;
849         PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0);
850
851         bind_evtchn_to_cpu(evtchn, 0);
852         evtchn_to_irq[evtchn] = -1;
853         irq_info[irq] = IRQ_UNBOUND;
854  done:
855         mtx_unlock_spin(&irq_mapping_update_lock);
856 }
857
858 static void 
859 xenpic_pirq_enable_source(struct intsrc *isrc)
860 {
861         int evtchn;
862         unsigned int irq;
863
864         mtx_lock_spin(&irq_mapping_update_lock);
865         irq = xenpic_vector(isrc);
866         evtchn = evtchn_from_irq(irq);
867
868         if (!VALID_EVTCHN(evtchn))
869                 goto done;
870
871         unmask_evtchn(evtchn);
872         pirq_unmask_notify(irq_to_pirq(irq));
873  done:
874         mtx_unlock_spin(&irq_mapping_update_lock);
875 }
876
877 static void 
878 xenpic_pirq_disable_source(struct intsrc *isrc, int eoi)
879 {
880         int evtchn;
881         unsigned int irq;
882
883         mtx_lock_spin(&irq_mapping_update_lock);
884         irq = xenpic_vector(isrc);
885         evtchn = evtchn_from_irq(irq);
886
887         if (!VALID_EVTCHN(evtchn))
888                 goto done;
889
890         mask_evtchn(evtchn);
891  done:
892         mtx_unlock_spin(&irq_mapping_update_lock);
893 }
894
895
896 static void 
897 xenpic_pirq_eoi_source(struct intsrc *isrc)
898 {
899         int evtchn;
900         unsigned int irq;
901
902         mtx_lock_spin(&irq_mapping_update_lock);
903         irq = xenpic_vector(isrc);
904         evtchn = evtchn_from_irq(irq);
905
906         if (!VALID_EVTCHN(evtchn))
907                 goto done;
908
909         unmask_evtchn(evtchn);
910         pirq_unmask_notify(irq_to_pirq(irq));
911  done:
912         mtx_unlock_spin(&irq_mapping_update_lock);
913 }
914
915 int
916 irq_to_evtchn_port(int irq)
917 {
918         return evtchn_from_irq(irq);
919 }
920
921 void 
922 mask_evtchn(int port)
923 {
924         shared_info_t *s = HYPERVISOR_shared_info;
925         synch_set_bit(port, &s->evtchn_mask[0]);
926 }
927
928 void 
929 unmask_evtchn(int port)
930 {
931         shared_info_t *s = HYPERVISOR_shared_info;
932         unsigned int cpu = smp_processor_id();
933         vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
934
935         /* Slow path (hypercall) if this is a non-local port. */
936         if (unlikely(cpu != cpu_from_evtchn(port))) {
937                 struct evtchn_unmask unmask = { .port = port };
938                 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
939                 return;
940         }
941
942         synch_clear_bit(port, &s->evtchn_mask);
943
944         /*
945          * The following is basically the equivalent of 'hw_resend_irq'. Just
946          * like a real IO-APIC we 'lose the interrupt edge' if the channel is
947          * masked.
948          */
949         if (synch_test_bit(port, &s->evtchn_pending) && 
950             !synch_test_and_set_bit(port / BITS_PER_LONG,
951                                     &vcpu_info->evtchn_pending_sel)) {
952                 vcpu_info->evtchn_upcall_pending = 1;
953                 if (!vcpu_info->evtchn_upcall_mask)
954                         force_evtchn_callback();
955         }
956 }
957
958 void irq_resume(void)
959 {
960         evtchn_op_t op;
961         int         cpu, pirq, virq, ipi, irq, evtchn;
962
963         struct evtchn_bind_virq bind_virq;
964         struct evtchn_bind_ipi bind_ipi;        
965
966         init_evtchn_cpu_bindings();
967
968         /* New event-channel space is not 'live' yet. */
969         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
970                 mask_evtchn(evtchn);
971
972         /* Check that no PIRQs are still bound. */
973         for (pirq = 0; pirq < NR_PIRQS; pirq++)
974                 PANIC_IF(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
975
976         /* Secondary CPUs must have no VIRQ or IPI bindings. */
977         for (cpu = 1; cpu < NR_CPUS; cpu++) {
978                 for (virq = 0; virq < NR_VIRQS; virq++)
979                         PANIC_IF(per_cpu(virq_to_irq, cpu)[virq] != -1);
980                 for (ipi = 0; ipi < NR_IPIS; ipi++)
981                         PANIC_IF(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
982         }
983
984         /* No IRQ <-> event-channel mappings. */
985         for (irq = 0; irq < NR_IRQS; irq++)
986                 irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
987         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
988                 evtchn_to_irq[evtchn] = -1;
989
990         /* Primary CPU: rebind VIRQs automatically. */
991         for (virq = 0; virq < NR_VIRQS; virq++) {
992                 if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
993                         continue;
994
995                 PANIC_IF(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
996
997                 /* Get a new binding from Xen. */
998                 bind_virq.virq = virq;
999                 bind_virq.vcpu = 0;
1000                 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq) != 0);
1001                 evtchn = bind_virq.port;
1002         
1003                 /* Record the new mapping. */
1004                 evtchn_to_irq[evtchn] = irq;
1005                 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
1006
1007                 /* Ready for use. */
1008                 unmask_evtchn(evtchn);
1009         }
1010
1011         /* Primary CPU: rebind IPIs automatically. */
1012         for (ipi = 0; ipi < NR_IPIS; ipi++) {
1013                 if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
1014                         continue;
1015
1016                 PANIC_IF(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
1017
1018                 /* Get a new binding from Xen. */
1019                 memset(&op, 0, sizeof(op));
1020                 bind_ipi.vcpu = 0;
1021                 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0);
1022                 evtchn = bind_ipi.port;
1023         
1024                 /* Record the new mapping. */
1025                 evtchn_to_irq[evtchn] = irq;
1026                 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
1027
1028                 /* Ready for use. */
1029                 unmask_evtchn(evtchn);
1030         }
1031 }
1032
1033 static void 
1034 evtchn_init(void *dummy __unused)
1035 {
1036         int i, cpu;
1037         struct xenpic_intsrc *pin, *tpin;
1038
1039         /* No VIRQ or IPI bindings. */
1040         for (cpu = 0; cpu < NR_CPUS; cpu++) {
1041                 for (i = 0; i < NR_VIRQS; i++)
1042                         per_cpu(virq_to_irq, cpu)[i] = -1;
1043                 for (i = 0; i < NR_IPIS; i++)
1044                         per_cpu(ipi_to_irq, cpu)[i] = -1;
1045         }
1046
1047         /* No event-channel -> IRQ mappings. */
1048         for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1049                 evtchn_to_irq[i] = -1;
1050                 mask_evtchn(i); /* No event channels are 'live' right now. */
1051         }
1052
1053         /* No IRQ -> event-channel mappings. */
1054         for (i = 0; i < NR_IRQS; i++)
1055                 irq_info[i] = IRQ_UNBOUND;
1056         
1057         xp = malloc(sizeof(struct xenpic) + NR_IRQS*sizeof(struct xenpic_intsrc), 
1058                     M_DEVBUF, M_WAITOK);
1059
1060         xp->xp_dynirq_pic = &xenpic_dynirq_template;
1061         xp->xp_pirq_pic = &xenpic_pirq_template;
1062         xp->xp_numintr = NR_IRQS;
1063         bzero(xp->xp_pins, sizeof(struct xenpic_intsrc) * NR_IRQS);
1064
1065
1066         /* We need to register our PIC's beforehand */
1067         if (intr_register_pic(&xenpic_pirq_template))
1068                 panic("XEN: intr_register_pic() failure");
1069         if (intr_register_pic(&xenpic_dynirq_template))
1070                 panic("XEN: intr_register_pic() failure");
1071
1072         /*
1073          * Initialize the dynamic IRQ's - we initialize the structures, but
1074          * we do not bind them (bind_evtchn_to_irqhandle() does this)
1075          */
1076         pin = xp->xp_pins;
1077         for (i = 0; i < NR_DYNIRQS; i++) {
1078                 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
1079                 irq_bindcount[dynirq_to_irq(i)] = 0;
1080
1081                 tpin = &pin[dynirq_to_irq(i)];
1082                 tpin->xp_intsrc.is_pic = xp->xp_dynirq_pic;
1083                 tpin->xp_vector = dynirq_to_irq(i);
1084                 
1085         }
1086         /*
1087          * Now, we go ahead and claim every PIRQ there is.
1088          */
1089         pin = xp->xp_pins;
1090         for (i = 0; i < NR_PIRQS; i++) {
1091                 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
1092                 irq_bindcount[pirq_to_irq(i)] = 0;
1093
1094 #ifdef RTC_IRQ
1095                 /* If not domain 0, force our RTC driver to fail its probe. */
1096                 if ((i == RTC_IRQ) &&
1097                     !(xen_start_info->flags & SIF_INITDOMAIN))
1098                         continue;
1099 #endif
1100                 tpin = &pin[pirq_to_irq(i)];            
1101                 tpin->xp_intsrc.is_pic = xp->xp_pirq_pic;
1102                 tpin->xp_vector = pirq_to_irq(i);
1103
1104         }
1105 }
1106
1107 SYSINIT(evtchn_init, SI_SUB_INTR, SI_ORDER_ANY, evtchn_init, NULL);
1108     /*
1109      * irq_mapping_update_lock: in order to allow an interrupt to occur in a critical
1110      *          section, to set pcpu->ipending (etc...) properly, we
1111      *          must be able to get the icu lock, so it can't be
1112      *          under witness.
1113      */
1114
1115 MTX_SYSINIT(irq_mapping_update_lock, &irq_mapping_update_lock, "xp", MTX_SPIN);