]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - 6/sys/xen/evtchn/evtchn.c
Clone Kip's Xen on stable/6 tree so that I can work on improving FreeBSD/amd64
[FreeBSD/FreeBSD.git] / 6 / sys / xen / evtchn / evtchn.c
1 /******************************************************************************
2  * evtchn.c
3  * 
4  * Communication via Xen event channels.
5  * 
6  * Copyright (c) 2002-2005, K A Fraser
7  * Copyright (c) 2005-2006 Kip Macy
8  */
9
10 #include <sys/cdefs.h>
11 __FBSDID("$FreeBSD$");
12
13 #include <sys/param.h>
14 #include <sys/systm.h>
15 #include <sys/bus.h>
16 #include <sys/malloc.h>
17 #include <sys/kernel.h>
18 #include <sys/lock.h>
19 #include <sys/mutex.h>
20 #include <sys/interrupt.h>
21 #include <sys/pcpu.h>
22
23 #include <machine/cpufunc.h>
24 #include <machine/intr_machdep.h>
25
26 #include <machine/xen/xen-os.h>
27 #include <machine/xen/xenvar.h>
28 #include <machine/xen/xen_intr.h>
29 #include <machine/xen/synch_bitops.h>
30 #include <machine/xen/evtchn.h>
31 #include <machine/xen/hypervisor.h>
32 #include <sys/smp.h>
33
34
35
36 /* linux helper functions that got sucked in 
37  * rename and move XXX
38  */
39
40
41 static inline int find_first_bit(const unsigned long *addr, unsigned size)
42 {
43         int d0, d1;
44         int res;
45
46         /* This looks at memory. Mark it volatile to tell gcc not to move it around */
47         __asm__ __volatile__(
48                 "xorl %%eax,%%eax\n\t"
49                 "repe; scasl\n\t"
50                 "jz 1f\n\t"
51                 "leal -4(%%edi),%%edi\n\t"
52                 "bsfl (%%edi),%%eax\n"
53                 "1:\tsubl %%ebx,%%edi\n\t"
54                 "shll $3,%%edi\n\t"
55                 "addl %%edi,%%eax"
56                 :"=a" (res), "=&c" (d0), "=&D" (d1)
57                 :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
58         return res;
59 }
60
61 #define min_t(type,x,y) \
62         ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
63 #define first_cpu(src) __first_cpu(&(src), NR_CPUS)
64 static inline int __first_cpu(const xen_cpumask_t *srcp, int nbits)
65 {
66         return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
67 }
68
69 static inline unsigned long __ffs(unsigned long word)
70 {
71         __asm__("bsfl %1,%0"
72                 :"=r" (word)
73                 :"rm" (word));
74         return word;
75 }
76
77 static struct mtx irq_mapping_update_lock;
78 static struct xenpic *xp;
79 struct xenpic_intsrc {
80         struct intsrc     xp_intsrc;
81         uint8_t           xp_vector;
82         boolean_t         xp_masked;
83 };
84
85 struct xenpic { 
86         struct pic           *xp_dynirq_pic; 
87         struct pic           *xp_pirq_pic;   
88         uint16_t             xp_numintr; 
89         struct xenpic_intsrc xp_pins[0]; 
90 }; 
91
92 #define TODO            printf("%s: not implemented!\n", __func__) 
93
94 /* IRQ <-> event-channel mappings. */
95 static int evtchn_to_irq[NR_EVENT_CHANNELS];
96
97 /* Packed IRQ information: binding type, sub-type index, and event channel. */
98 static uint32_t irq_info[NR_IRQS];
99 /* Binding types. */
100 enum {
101         IRQT_UNBOUND,
102         IRQT_PIRQ,
103         IRQT_VIRQ,
104         IRQT_IPI,
105         IRQT_LOCAL_PORT,
106         IRQT_CALLER_PORT,
107         _IRQT_COUNT
108         
109 };
110
111
112 #define _IRQT_BITS 4
113 #define _EVTCHN_BITS 12
114 #define _INDEX_BITS (32 - _IRQT_BITS - _EVTCHN_BITS)
115
116 /* Constructor for packed IRQ information. */
117 static inline uint32_t
118 mk_irq_info(uint32_t type, uint32_t index, uint32_t evtchn)
119 {
120
121         return ((type << (32 - _IRQT_BITS)) | (index << _EVTCHN_BITS) | evtchn);
122 }
123
124 /* Constructor for packed IRQ information. */
125
126 /* Convenient shorthand for packed representation of an unbound IRQ. */
127 #define IRQ_UNBOUND     mk_irq_info(IRQT_UNBOUND, 0, 0)
128
129 /*
130  * Accessors for packed IRQ information.
131  */
132
133 static inline unsigned int evtchn_from_irq(int irq)
134 {
135         return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
136 }
137
138 static inline unsigned int index_from_irq(int irq)
139 {
140         return (irq_info[irq] >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
141 }
142
143 static inline unsigned int type_from_irq(int irq)
144 {
145         return irq_info[irq] >> (32 - _IRQT_BITS);
146 }
147
148
149 /* IRQ <-> VIRQ mapping. */ 
150  
151 /* IRQ <-> IPI mapping. */ 
152 #ifndef NR_IPIS
153 #ifdef SMP
154 #error "NR_IPIS not defined"
155 #endif
156 #define NR_IPIS 1 
157 #endif 
158
159 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
160 static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
161
162 /* Reference counts for bindings to IRQs. */
163 static int irq_bindcount[NR_IRQS];
164
165 #define VALID_EVTCHN(_chn) ((_chn) != 0)
166
167 #ifdef SMP
168
169 static uint8_t cpu_evtchn[NR_EVENT_CHANNELS];
170 static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
171
172 #define active_evtchns(cpu,sh,idx)              \
173         ((sh)->evtchn_pending[idx] &            \
174          cpu_evtchn_mask[cpu][idx] &            \
175          ~(sh)->evtchn_mask[idx])
176
177 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
178 {
179         clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
180         set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
181         cpu_evtchn[chn] = cpu;
182 }
183
184 static void init_evtchn_cpu_bindings(void)
185 {
186         /* By default all event channels notify CPU#0. */
187         memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
188         memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
189 }
190
191 #define cpu_from_evtchn(evtchn)         (cpu_evtchn[evtchn])
192
193 #else
194
195 #define active_evtchns(cpu,sh,idx)              \
196         ((sh)->evtchn_pending[idx] &            \
197          ~(sh)->evtchn_mask[idx])
198 #define bind_evtchn_to_cpu(chn,cpu)     ((void)0)
199 #define init_evtchn_cpu_bindings()      ((void)0)
200 #define cpu_from_evtchn(evtchn)         (0)
201
202 #endif
203
204
205 /*
206  * Force a proper event-channel callback from Xen after clearing the
207  * callback mask. We do this in a very simple manner, by making a call
208  * down into Xen. The pending flag will be checked by Xen on return.
209  */
210 void force_evtchn_callback(void)
211 {
212         (void)HYPERVISOR_xen_version(0, NULL);
213 }
214
215 void 
216 evtchn_do_upcall(struct intrframe *frame) 
217 {
218         unsigned long  l1, l2;
219         unsigned int   l1i, l2i, port;
220         int            irq, cpu;
221         shared_info_t *s;
222         vcpu_info_t   *vcpu_info;
223         
224         cpu = smp_processor_id();
225         s = HYPERVISOR_shared_info;
226         vcpu_info = &s->vcpu_info[cpu];
227
228         vcpu_info->evtchn_upcall_pending = 0;
229
230         /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
231         l1 = xen_xchg(&vcpu_info->evtchn_pending_sel, 0);
232
233         while (l1 != 0) {
234                 l1i = __ffs(l1);
235                 l1 &= ~(1 << l1i);
236                 
237                 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
238                         l2i = __ffs(l2);
239
240                         port = (l1i * BITS_PER_LONG) + l2i;
241                         if ((irq = evtchn_to_irq[port]) != -1) {
242                                 struct intsrc *isrc = intr_lookup_source(irq);
243                                 /* 
244                                  * ack 
245                                  */
246                                 mask_evtchn(port);
247                                 clear_evtchn(port); 
248
249                                 intr_execute_handlers(isrc, frame);
250                         } else {
251                                 evtchn_device_upcall(port);
252                         }
253                 }
254         }
255 }
256
257 void
258 ipi_pcpu(unsigned int cpu, int vector) 
259
260         int irq;
261
262         irq = per_cpu(ipi_to_irq, cpu)[vector]; 
263         
264         notify_remote_via_irq(irq); 
265
266
267 static int 
268 find_unbound_irq(void)
269 {
270         int dynirq, irq;
271         
272         for (dynirq = 0; dynirq < NR_IRQS; dynirq++) {
273                 irq = dynirq_to_irq(dynirq);
274                 if (irq_bindcount[irq] == 0)
275                         break;
276         }
277         
278         if (irq == NR_IRQS)
279                 panic("No available IRQ to bind to: increase NR_IRQS!\n");
280
281         return (irq);
282 }
283
284 static int
285 bind_caller_port_to_irq(unsigned int caller_port)
286 {
287         int irq;
288
289         mtx_lock_spin(&irq_mapping_update_lock);
290
291         if ((irq = evtchn_to_irq[caller_port]) == -1) {
292                 if ((irq = find_unbound_irq()) < 0)
293                         goto out;
294
295                 evtchn_to_irq[caller_port] = irq;
296                 irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
297         }
298
299         irq_bindcount[irq]++;
300
301  out:
302         mtx_unlock_spin(&irq_mapping_update_lock);
303         return irq;
304 }
305
306 static int
307 bind_local_port_to_irq(unsigned int local_port)
308 {
309         int irq;
310
311         mtx_lock_spin(&irq_mapping_update_lock);
312
313         PANIC_IF(evtchn_to_irq[local_port] != -1);
314
315         if ((irq = find_unbound_irq()) < 0) {
316                 struct evtchn_close close = { .port = local_port };
317                 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_close, &close));
318                 
319                 goto out;
320         }
321
322         evtchn_to_irq[local_port] = irq;
323         irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
324         irq_bindcount[irq]++;
325
326  out:
327         mtx_unlock_spin(&irq_mapping_update_lock);
328         return irq;
329 }
330
331 static int
332 bind_listening_port_to_irq(unsigned int remote_domain)
333 {
334         struct evtchn_alloc_unbound alloc_unbound;
335         int err;
336
337         alloc_unbound.dom        = DOMID_SELF;
338         alloc_unbound.remote_dom = remote_domain;
339
340         err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
341                                           &alloc_unbound);
342
343         return err ? : bind_local_port_to_irq(alloc_unbound.port);
344 }
345
346 static int
347 bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
348     unsigned int remote_port)
349 {
350         struct evtchn_bind_interdomain bind_interdomain;
351         int err;
352
353         bind_interdomain.remote_dom  = remote_domain;
354         bind_interdomain.remote_port = remote_port;
355
356         err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
357                                           &bind_interdomain);
358
359         return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
360 }
361
362 static int 
363 bind_virq_to_irq(unsigned int virq, unsigned int cpu)
364 {
365         struct evtchn_bind_virq bind_virq;
366         int evtchn, irq;
367
368         mtx_lock_spin(&irq_mapping_update_lock);
369
370         if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
371                 if ((irq = find_unbound_irq()) < 0)
372                         goto out;
373
374                 bind_virq.virq = virq;
375                 bind_virq.vcpu = cpu;
376                 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
377                         &bind_virq) != 0);
378
379                 evtchn = bind_virq.port;
380
381                 evtchn_to_irq[evtchn] = irq;
382                 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
383
384                 per_cpu(virq_to_irq, cpu)[virq] = irq;
385
386                 bind_evtchn_to_cpu(evtchn, cpu);
387         }
388
389         irq_bindcount[irq]++;
390 out:
391         mtx_unlock_spin(&irq_mapping_update_lock);
392
393         return irq;
394 }
395
396
397 extern int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu);
398
399 int 
400 bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
401 {
402         struct evtchn_bind_ipi bind_ipi;
403         int evtchn, irq;
404         
405         mtx_lock_spin(&irq_mapping_update_lock);
406         
407         if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
408                 if ((irq = find_unbound_irq()) < 0)
409                         goto out;
410
411                 bind_ipi.vcpu = cpu;
412                 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0);
413                 evtchn = bind_ipi.port;
414
415                 evtchn_to_irq[evtchn] = irq;
416                 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
417
418                 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
419
420                 bind_evtchn_to_cpu(evtchn, cpu);
421         }
422         irq_bindcount[irq]++;
423 out:
424         
425         mtx_unlock_spin(&irq_mapping_update_lock);
426
427         return irq;
428 }
429
430
431 void 
432 unbind_from_irq(int irq)
433 {
434         struct evtchn_close close;
435         int evtchn = evtchn_from_irq(irq);
436
437         mtx_lock_spin(&irq_mapping_update_lock);
438
439         if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
440                 close.port = evtchn;
441                 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0);
442
443                 switch (type_from_irq(irq)) {
444                 case IRQT_VIRQ:
445                         per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))[index_from_irq(irq)] = -1;
446                         break;
447                 case IRQT_IPI:
448                         per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))[index_from_irq(irq)] = -1;
449                         break;
450                 default:
451                         break;
452                 }
453
454                 /* Closed ports are implicitly re-bound to VCPU0. */
455                 bind_evtchn_to_cpu(evtchn, 0);
456
457                 evtchn_to_irq[evtchn] = -1;
458                 irq_info[irq] = IRQ_UNBOUND;
459         }
460
461         mtx_unlock_spin(&irq_mapping_update_lock);
462 }
463
464 int 
465 bind_caller_port_to_irqhandler(unsigned int caller_port,
466                           const char *devname,
467                           driver_intr_t handler,
468                           void *arg,
469                           unsigned long irqflags,
470                           void **cookiep)
471 {
472         unsigned int irq;
473         int retval;
474
475         irq = bind_caller_port_to_irq(caller_port);
476         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
477         retval = intr_add_handler(devname, irq, handler, arg, irqflags, cookiep);
478         if (retval != 0) {
479                 unbind_from_irq(irq);
480                 return -retval;
481         }
482
483         return irq;
484 }
485
486 int 
487 bind_listening_port_to_irqhandler(
488                           unsigned int remote_domain,
489                           const char *devname,
490                           driver_intr_t handler,
491                           void *arg,
492                           unsigned long irqflags,
493                           void **cookiep)
494 {
495         unsigned int irq;
496         int retval;
497
498         irq = bind_listening_port_to_irq(remote_domain);
499         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
500         retval = intr_add_handler(devname, irq, handler, arg, irqflags, cookiep);
501         if (retval != 0) {
502                 unbind_from_irq(irq);
503                 return -retval;
504         }
505
506         return irq;
507 }
508
509 int 
510 bind_interdomain_evtchn_to_irqhandler(
511                         unsigned int remote_domain,
512                         unsigned int remote_port,
513                         const char *devname,
514                         driver_intr_t handler,
515                         unsigned long irqflags)
516 {
517         unsigned int irq;
518         int retval;
519
520         irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
521         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
522         retval = intr_add_handler(devname, irq, handler, NULL, irqflags, NULL);
523         if (retval != 0) {
524                 unbind_from_irq(irq);
525                 return -retval;
526         }
527
528         return irq;
529 }
530
531 int 
532 bind_virq_to_irqhandler(unsigned int virq,
533                         unsigned int cpu,
534                         const char *devname,
535                         driver_intr_t handler,
536                         unsigned long irqflags)
537 {
538         unsigned int irq;
539         int retval;
540
541         irq = bind_virq_to_irq(virq, cpu);
542         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
543         retval = intr_add_handler(devname, irq, handler, NULL, irqflags, NULL);
544         if (retval != 0) {
545                 unbind_from_irq(irq);
546                 return -retval;
547         }
548
549         return irq;
550 }
551
552 int 
553 bind_ipi_to_irqhandler(unsigned int ipi,
554                        unsigned int cpu,
555                        const char *devname,
556                        driver_intr_t handler,
557                        unsigned long irqflags)
558 {
559         int irq, retval;
560         
561         irq = bind_ipi_to_irq(ipi, cpu);
562         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
563         retval = intr_add_handler(devname, irq, handler, NULL, irqflags, NULL);
564         if (retval != 0) {
565                 unbind_from_irq(irq);
566                 return -retval;
567         }
568
569         return irq;
570 }
571
572 void
573 unbind_from_irqhandler(unsigned int irq, void *dev_id)
574 {
575         if (dev_id)
576                 intr_remove_handler(dev_id); /* XXX */
577         unbind_from_irq(irq);
578 }
579
580 #if 0
581 /* Rebind an evtchn so that it gets delivered to a specific cpu */
582 static void
583 rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
584 {
585         evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
586         int evtchn;
587
588         mtx_lock_spin(&irq_mapping_update_lock);
589
590         evtchn = evtchn_from_irq(irq);
591         if (!VALID_EVTCHN(evtchn)) {
592                 mtx_unlock_spin(&irq_mapping_update_lock);
593                 return;
594         }
595
596         /* Send future instances of this interrupt to other vcpu. */
597         bind_vcpu.port = evtchn;
598         bind_vcpu.vcpu = tcpu;
599
600         /*
601          * If this fails, it usually just indicates that we're dealing with a 
602          * virq or IPI channel, which don't actually need to be rebound. Ignore
603          * it, but don't do the xenlinux-level rebind in that case.
604          */
605         if (HYPERVISOR_event_channel_op(&op) >= 0)
606                 bind_evtchn_to_cpu(evtchn, tcpu);
607
608         mtx_unlock_spin(&irq_mapping_update_lock);
609
610 }
611
612 static void set_affinity_irq(unsigned irq, xen_cpumask_t dest)
613 {
614         unsigned tcpu = first_cpu(dest);
615         rebind_irq_to_cpu(irq, tcpu);
616 }
617 #endif
618
619 /*
620  * Interface to generic handling in intr_machdep.c
621  */
622
623
624 /*------------ interrupt handling --------------------------------------*/
625 #define TODO            printf("%s: not implemented!\n", __func__) 
626
627
628 static void     xenpic_dynirq_enable_source(struct intsrc *isrc); 
629 static void     xenpic_dynirq_disable_source(struct intsrc *isrc, int); 
630 static void     xenpic_dynirq_eoi_source(struct intsrc *isrc); 
631 static void     xenpic_dynirq_enable_intr(struct intsrc *isrc); 
632
633 static void     xenpic_pirq_enable_source(struct intsrc *isrc); 
634 static void     xenpic_pirq_disable_source(struct intsrc *isrc, int); 
635 static void     xenpic_pirq_eoi_source(struct intsrc *isrc); 
636 static void     xenpic_pirq_enable_intr(struct intsrc *isrc); 
637
638
639 static int      xenpic_vector(struct intsrc *isrc); 
640 static int      xenpic_source_pending(struct intsrc *isrc); 
641 static void     xenpic_suspend(struct pic* pic); 
642 static void     xenpic_resume(struct pic* pic); 
643 static void     xenpic_assign_cpu(struct intsrc *, u_int apic_id);
644
645
646 struct pic xenpic_dynirq_template  =  { 
647         .pic_enable_source      =       xenpic_dynirq_enable_source, 
648         .pic_disable_source     =       xenpic_dynirq_disable_source,
649         .pic_eoi_source         =       xenpic_dynirq_eoi_source, 
650         .pic_enable_intr        =       xenpic_dynirq_enable_intr, 
651         .pic_vector             =       xenpic_vector, 
652         .pic_source_pending     =       xenpic_source_pending,
653         .pic_suspend            =       xenpic_suspend, 
654         .pic_resume             =       xenpic_resume 
655 };
656
657 struct pic xenpic_pirq_template  =  { 
658         .pic_enable_source      =       xenpic_pirq_enable_source, 
659         .pic_disable_source     =       xenpic_pirq_disable_source,
660         .pic_eoi_source         =       xenpic_pirq_eoi_source, 
661         .pic_enable_intr        =       xenpic_pirq_enable_intr, 
662         .pic_vector             =       xenpic_vector, 
663         .pic_source_pending     =       xenpic_source_pending,
664         .pic_suspend            =       xenpic_suspend, 
665         .pic_resume             =       xenpic_resume,
666         .pic_assign_cpu         =       xenpic_assign_cpu
667 };
668
669
670
671 void 
672 xenpic_dynirq_enable_source(struct intsrc *isrc)
673 {
674         unsigned int irq;
675         struct xenpic_intsrc *xp;
676
677         xp = (struct xenpic_intsrc *)isrc;
678         
679         mtx_lock_spin(&irq_mapping_update_lock);
680         if (xp->xp_masked) {
681                 irq = xenpic_vector(isrc);
682                 unmask_evtchn(evtchn_from_irq(irq));
683                 xp->xp_masked = FALSE;
684         }
685         mtx_unlock_spin(&irq_mapping_update_lock);
686 }
687
688 static void 
689 xenpic_dynirq_disable_source(struct intsrc *isrc, int foo)
690 {
691         unsigned int irq;
692         struct xenpic_intsrc *xp;
693         
694         xp = (struct xenpic_intsrc *)isrc;
695         
696         mtx_lock_spin(&irq_mapping_update_lock);
697         if (!xp->xp_masked) {
698                 irq = xenpic_vector(isrc);
699                 mask_evtchn(evtchn_from_irq(irq));
700                 xp->xp_masked = TRUE;
701         }       
702         mtx_unlock_spin(&irq_mapping_update_lock);
703 }
704
705 static void 
706 xenpic_dynirq_enable_intr(struct intsrc *isrc)
707 {
708         unsigned int irq;
709         struct xenpic_intsrc *xp;
710         
711         xp = (struct xenpic_intsrc *)isrc;      
712         mtx_lock_spin(&irq_mapping_update_lock);
713         xp->xp_masked = 0;
714         irq = xenpic_vector(isrc);
715         unmask_evtchn(evtchn_from_irq(irq));
716         mtx_unlock_spin(&irq_mapping_update_lock);
717 }
718
719 static void 
720 xenpic_dynirq_eoi_source(struct intsrc *isrc)
721 {
722         unsigned int irq;
723         struct xenpic_intsrc *xp;
724         
725         xp = (struct xenpic_intsrc *)isrc;      
726         mtx_lock_spin(&irq_mapping_update_lock);
727         xp->xp_masked = 0;
728         irq = xenpic_vector(isrc);
729         unmask_evtchn(evtchn_from_irq(irq));
730         mtx_unlock_spin(&irq_mapping_update_lock);
731 }
732
733 static int
734 xenpic_vector(struct intsrc *isrc)
735 {
736     struct xenpic_intsrc *pin;
737
738     pin = (struct xenpic_intsrc *)isrc;
739    //printf("xenpic_vector(): isrc=%p,vector=%u\n", pin, pin->xp_vector);
740
741     return (pin->xp_vector);
742 }
743
744 static int
745 xenpic_source_pending(struct intsrc *isrc)
746 {
747     struct xenpic_intsrc *pin = (struct xenpic_intsrc *)isrc;
748
749         /* XXXEN: TODO */
750         printf("xenpic_source_pending(): vector=%x,masked=%x\n",
751             pin->xp_vector, pin->xp_masked);
752
753 /*      notify_remote_via_evtchn(pin->xp_vector); // XXX RS: Is this correct? */
754         return 0;
755 }
756
757 static void 
758 xenpic_suspend(struct pic* pic)
759
760         TODO; 
761
762  
763 static void 
764 xenpic_resume(struct pic* pic)
765
766         TODO; 
767 }
768
769 static void 
770 xenpic_assign_cpu(struct intsrc *isrc, u_int apic_id)
771
772         TODO; 
773 }
774
775 void
776 notify_remote_via_irq(int irq)
777 {
778         int evtchn = evtchn_from_irq(irq);
779
780         if (VALID_EVTCHN(evtchn))
781                 notify_remote_via_evtchn(evtchn);
782         else
783                 panic("invalid evtchn");
784 }
785
786 /* required for support of physical devices */
787 static inline void 
788 pirq_unmask_notify(int pirq)
789 {
790         struct physdev_eoi eoi = { .irq = pirq };
791
792         if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
793                 (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
794         }
795 }
796
797 static inline void 
798 pirq_query_unmask(int pirq)
799 {
800         struct physdev_irq_status_query irq_status_query;
801
802         irq_status_query.irq = pirq;
803         (void)HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY, &irq_status_query);
804         clear_bit(pirq, &pirq_needs_unmask_notify[0]);
805         if ( irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
806                 set_bit(pirq, &pirq_needs_unmask_notify[0]);
807 }
808
809 /*
810  * On startup, if there is no action associated with the IRQ then we are
811  * probing. In this case we should not share with others as it will confuse us.
812  */
813 #define probing_irq(_irq) (intr_lookup_source(irq) == NULL)
814
815 static void 
816 xenpic_pirq_enable_intr(struct intsrc *isrc)
817 {
818         struct evtchn_bind_pirq bind_pirq;
819         int evtchn;
820         unsigned int irq;
821         
822         mtx_lock_spin(&irq_mapping_update_lock);
823         irq = xenpic_vector(isrc);
824         evtchn = evtchn_from_irq(irq);
825
826         if (VALID_EVTCHN(evtchn))
827                 goto out;
828
829         bind_pirq.pirq  = irq;
830         /* NB. We are happy to share unless we are probing. */
831         bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
832         
833         if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
834 #ifndef XEN_PRIVILEGED_GUEST
835                 panic("unexpected pirq call");
836 #endif
837                 if (!probing_irq(irq)) /* Some failures are expected when probing. */
838                         printf("Failed to obtain physical IRQ %d\n", irq);
839                 mtx_unlock_spin(&irq_mapping_update_lock);
840                 return;
841         }
842         evtchn = bind_pirq.port;
843
844         pirq_query_unmask(irq_to_pirq(irq));
845
846         bind_evtchn_to_cpu(evtchn, 0);
847         evtchn_to_irq[evtchn] = irq;
848         irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
849
850  out:
851         unmask_evtchn(evtchn);
852         pirq_unmask_notify(irq_to_pirq(irq));
853         mtx_unlock_spin(&irq_mapping_update_lock);
854 }
855
856 static void 
857 xenpic_pirq_enable_source(struct intsrc *isrc)
858 {
859         int evtchn;
860         unsigned int irq;
861
862         mtx_lock_spin(&irq_mapping_update_lock);
863         irq = xenpic_vector(isrc);
864         evtchn = evtchn_from_irq(irq);
865
866         if (!VALID_EVTCHN(evtchn))
867                 goto done;
868
869         unmask_evtchn(evtchn);
870         pirq_unmask_notify(irq_to_pirq(irq));
871  done:
872         mtx_unlock_spin(&irq_mapping_update_lock);
873 }
874
875 static void 
876 xenpic_pirq_disable_source(struct intsrc *isrc, int eoi)
877 {
878         int evtchn;
879         unsigned int irq;
880
881         mtx_lock_spin(&irq_mapping_update_lock);
882         irq = xenpic_vector(isrc);
883         evtchn = evtchn_from_irq(irq);
884
885         if (!VALID_EVTCHN(evtchn))
886                 goto done;
887
888         mask_evtchn(evtchn);
889  done:
890         mtx_unlock_spin(&irq_mapping_update_lock);
891 }
892
893
894 static void 
895 xenpic_pirq_eoi_source(struct intsrc *isrc)
896 {
897         int evtchn;
898         unsigned int irq;
899
900         mtx_lock_spin(&irq_mapping_update_lock);
901         irq = xenpic_vector(isrc);
902         evtchn = evtchn_from_irq(irq);
903
904         if (!VALID_EVTCHN(evtchn))
905                 goto done;
906
907         unmask_evtchn(evtchn);
908         pirq_unmask_notify(irq_to_pirq(irq));
909  done:
910         mtx_unlock_spin(&irq_mapping_update_lock);
911 }
912
913 int
914 irq_to_evtchn_port(int irq)
915 {
916         return evtchn_from_irq(irq);
917 }
918
919 void 
920 mask_evtchn(int port)
921 {
922         shared_info_t *s = HYPERVISOR_shared_info;
923         synch_set_bit(port, &s->evtchn_mask[0]);
924 }
925
926 void 
927 unmask_evtchn(int port)
928 {
929         shared_info_t *s = HYPERVISOR_shared_info;
930         unsigned int cpu = smp_processor_id();
931         vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
932
933         /* Slow path (hypercall) if this is a non-local port. */
934         if (unlikely(cpu != cpu_from_evtchn(port))) {
935                 struct evtchn_unmask unmask = { .port = port };
936                 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
937                 return;
938         }
939
940         synch_clear_bit(port, &s->evtchn_mask);
941
942         /*
943          * The following is basically the equivalent of 'hw_resend_irq'. Just
944          * like a real IO-APIC we 'lose the interrupt edge' if the channel is
945          * masked.
946          */
947         if (synch_test_bit(port, &s->evtchn_pending) && 
948             !synch_test_and_set_bit(port / BITS_PER_LONG,
949                                     &vcpu_info->evtchn_pending_sel)) {
950                 vcpu_info->evtchn_upcall_pending = 1;
951                 if (!vcpu_info->evtchn_upcall_mask)
952                         force_evtchn_callback();
953         }
954 }
955
956 void irq_resume(void)
957 {
958         evtchn_op_t op;
959         int         cpu, pirq, virq, ipi, irq, evtchn;
960
961         struct evtchn_bind_virq bind_virq;
962         struct evtchn_bind_ipi bind_ipi;        
963
964         init_evtchn_cpu_bindings();
965
966         /* New event-channel space is not 'live' yet. */
967         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
968                 mask_evtchn(evtchn);
969
970         /* Check that no PIRQs are still bound. */
971         for (pirq = 0; pirq < NR_PIRQS; pirq++)
972                 PANIC_IF(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
973
974         /* Secondary CPUs must have no VIRQ or IPI bindings. */
975         for (cpu = 1; cpu < NR_CPUS; cpu++) {
976                 for (virq = 0; virq < NR_VIRQS; virq++)
977                         PANIC_IF(per_cpu(virq_to_irq, cpu)[virq] != -1);
978                 for (ipi = 0; ipi < NR_IPIS; ipi++)
979                         PANIC_IF(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
980         }
981
982         /* No IRQ <-> event-channel mappings. */
983         for (irq = 0; irq < NR_IRQS; irq++)
984                 irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
985         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
986                 evtchn_to_irq[evtchn] = -1;
987
988         /* Primary CPU: rebind VIRQs automatically. */
989         for (virq = 0; virq < NR_VIRQS; virq++) {
990                 if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
991                         continue;
992
993                 PANIC_IF(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
994
995                 /* Get a new binding from Xen. */
996                 bind_virq.virq = virq;
997                 bind_virq.vcpu = 0;
998                 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq) != 0);
999                 evtchn = bind_virq.port;
1000         
1001                 /* Record the new mapping. */
1002                 evtchn_to_irq[evtchn] = irq;
1003                 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
1004
1005                 /* Ready for use. */
1006                 unmask_evtchn(evtchn);
1007         }
1008
1009         /* Primary CPU: rebind IPIs automatically. */
1010         for (ipi = 0; ipi < NR_IPIS; ipi++) {
1011                 if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
1012                         continue;
1013
1014                 PANIC_IF(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
1015
1016                 /* Get a new binding from Xen. */
1017                 memset(&op, 0, sizeof(op));
1018                 bind_ipi.vcpu = 0;
1019                 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0);
1020                 evtchn = bind_ipi.port;
1021         
1022                 /* Record the new mapping. */
1023                 evtchn_to_irq[evtchn] = irq;
1024                 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
1025
1026                 /* Ready for use. */
1027                 unmask_evtchn(evtchn);
1028         }
1029 }
1030
1031 static void 
1032 evtchn_init(void *dummy __unused)
1033 {
1034         int i, cpu;
1035         struct xenpic_intsrc *pin, *tpin;
1036
1037
1038         init_evtchn_cpu_bindings();
1039         
1040          /* No VIRQ or IPI bindings. */
1041         for (cpu = 0; cpu < mp_ncpus; cpu++) {
1042                 for (i = 0; i < NR_VIRQS; i++)
1043                         per_cpu(virq_to_irq, cpu)[i] = -1;
1044                 for (i = 0; i < NR_IPIS; i++)
1045                         per_cpu(ipi_to_irq, cpu)[i] = -1;
1046         }
1047
1048         /* No event-channel -> IRQ mappings. */
1049         for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1050                 evtchn_to_irq[i] = -1;
1051                 mask_evtchn(i); /* No event channels are 'live' right now. */
1052         }
1053
1054         /* No IRQ -> event-channel mappings. */
1055         for (i = 0; i < NR_IRQS; i++)
1056                 irq_info[i] = IRQ_UNBOUND;
1057         
1058         xp = malloc(sizeof(struct xenpic) + NR_IRQS*sizeof(struct xenpic_intsrc), 
1059                     M_DEVBUF, M_WAITOK);
1060
1061         xp->xp_dynirq_pic = &xenpic_dynirq_template;
1062         xp->xp_pirq_pic = &xenpic_pirq_template;
1063         xp->xp_numintr = NR_IRQS;
1064         bzero(xp->xp_pins, sizeof(struct xenpic_intsrc) * NR_IRQS);
1065
1066
1067         /* We need to register our PIC's beforehand */
1068         if (intr_register_pic(&xenpic_pirq_template))
1069                 panic("XEN: intr_register_pic() failure");
1070         if (intr_register_pic(&xenpic_dynirq_template))
1071                 panic("XEN: intr_register_pic() failure");
1072
1073         /*
1074          * Initialize the dynamic IRQ's - we initialize the structures, but
1075          * we do not bind them (bind_evtchn_to_irqhandle() does this)
1076          */
1077         pin = xp->xp_pins;
1078         for (i = 0; i < NR_DYNIRQS; i++) {
1079                 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
1080                 irq_bindcount[dynirq_to_irq(i)] = 0;
1081
1082                 tpin = &pin[dynirq_to_irq(i)];
1083                 tpin->xp_intsrc.is_pic = xp->xp_dynirq_pic;
1084                 tpin->xp_vector = dynirq_to_irq(i);
1085                 
1086         }
1087         /*
1088          * Now, we go ahead and claim every PIRQ there is.
1089          */
1090         pin = xp->xp_pins;
1091         for (i = 0; i < NR_PIRQS; i++) {
1092                 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
1093                 irq_bindcount[pirq_to_irq(i)] = 0;
1094
1095 #ifdef RTC_IRQ
1096                 /* If not domain 0, force our RTC driver to fail its probe. */
1097                 if ((i == RTC_IRQ) &&
1098                     !(xen_start_info->flags & SIF_INITDOMAIN))
1099                         continue;
1100 #endif
1101                 tpin = &pin[pirq_to_irq(i)];            
1102                 tpin->xp_intsrc.is_pic = xp->xp_pirq_pic;
1103                 tpin->xp_vector = pirq_to_irq(i);
1104
1105         }
1106 }
1107
1108 SYSINIT(evtchn_init, SI_SUB_INTR, SI_ORDER_MIDDLE, evtchn_init, NULL);
1109     /*
1110      * irq_mapping_update_lock: in order to allow an interrupt to occur in a critical
1111      *          section, to set pcpu->ipending (etc...) properly, we
1112      *          must be able to get the icu lock, so it can't be
1113      *          under witness.
1114      */
1115
1116 MTX_SYSINIT(irq_mapping_update_lock, &irq_mapping_update_lock, "xp", MTX_SPIN);