]> CyberLeo.Net >> Repos - FreeBSD/releng/8.2.git/blob - sys/xen/evtchn/evtchn.c
MFC 216679:
[FreeBSD/releng/8.2.git] / sys / xen / evtchn / evtchn.c
1 /******************************************************************************
2  * evtchn.c
3  * 
4  * Communication via Xen event channels.
5  * 
6  * Copyright (c) 2002-2005, K A Fraser
7  * Copyright (c) 2005-2006 Kip Macy
8  */
9
10 #include <sys/cdefs.h>
11 __FBSDID("$FreeBSD$");
12
13 #include <sys/param.h>
14 #include <sys/systm.h>
15 #include <sys/bus.h>
16 #include <sys/limits.h>
17 #include <sys/malloc.h>
18 #include <sys/kernel.h>
19 #include <sys/lock.h>
20 #include <sys/mutex.h>
21 #include <sys/interrupt.h>
22 #include <sys/pcpu.h>
23 #include <sys/smp.h>
24
25 #include <machine/cpufunc.h>
26 #include <machine/intr_machdep.h>
27
28 #include <machine/xen/xen-os.h>
29 #include <machine/xen/xenvar.h>
30 #include <xen/xen_intr.h>
31 #include <machine/xen/synch_bitops.h>
32 #include <xen/evtchn.h>
33 #include <xen/hypervisor.h>
34 #include <sys/smp.h>
35
36 #include <xen/xen_intr.h>
37 #include <xen/evtchn.h>
38
39 static inline unsigned long __ffs(unsigned long word)
40 {
41         __asm__("bsfl %1,%0"
42                 :"=r" (word)
43                 :"rm" (word));
44         return word;
45 }
46
47 static struct mtx irq_mapping_update_lock;
48 static struct xenpic *xp;
49 struct xenpic_intsrc {
50         struct intsrc     xp_intsrc;
51         void              *xp_cookie;
52         uint8_t           xp_vector;
53         boolean_t         xp_masked;
54 };
55
56 struct xenpic { 
57         struct pic           *xp_dynirq_pic; 
58         struct pic           *xp_pirq_pic;   
59         uint16_t             xp_numintr; 
60         struct xenpic_intsrc xp_pins[0]; 
61 }; 
62
63 #define TODO            printf("%s: not implemented!\n", __func__) 
64
65 /* IRQ <-> event-channel mappings. */
66 static int evtchn_to_irq[NR_EVENT_CHANNELS];
67
68 /* Packed IRQ information: binding type, sub-type index, and event channel. */
69 static uint32_t irq_info[NR_IRQS];
70 /* Binding types. */
71 enum {
72         IRQT_UNBOUND,
73         IRQT_PIRQ,
74         IRQT_VIRQ,
75         IRQT_IPI,
76         IRQT_LOCAL_PORT,
77         IRQT_CALLER_PORT,
78         _IRQT_COUNT
79         
80 };
81
82
83 #define _IRQT_BITS 4
84 #define _EVTCHN_BITS 12
85 #define _INDEX_BITS (32 - _IRQT_BITS - _EVTCHN_BITS)
86
87 /* Constructor for packed IRQ information. */
88 static inline uint32_t
89 mk_irq_info(uint32_t type, uint32_t index, uint32_t evtchn)
90 {
91
92         return ((type << (32 - _IRQT_BITS)) | (index << _EVTCHN_BITS) | evtchn);
93 }
94
95 /* Constructor for packed IRQ information. */
96
97 /* Convenient shorthand for packed representation of an unbound IRQ. */
98 #define IRQ_UNBOUND     mk_irq_info(IRQT_UNBOUND, 0, 0)
99
100 /*
101  * Accessors for packed IRQ information.
102  */
103
104 static inline unsigned int evtchn_from_irq(int irq)
105 {
106         return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
107 }
108
109 static inline unsigned int index_from_irq(int irq)
110 {
111         return (irq_info[irq] >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
112 }
113
114 static inline unsigned int type_from_irq(int irq)
115 {
116         return irq_info[irq] >> (32 - _IRQT_BITS);
117 }
118
119
120 /* IRQ <-> VIRQ mapping. */ 
121  
122 /* IRQ <-> IPI mapping. */ 
123 #ifndef NR_IPIS
124 #ifdef SMP
125 #error "NR_IPIS not defined"
126 #endif
127 #define NR_IPIS 1 
128 #endif 
129
130 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
131 static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
132
133 /* Reference counts for bindings to IRQs. */
134 static int irq_bindcount[NR_IRQS];
135
136 #define VALID_EVTCHN(_chn) ((_chn) != 0)
137
138 #ifdef SMP
139
140 static uint8_t cpu_evtchn[NR_EVENT_CHANNELS];
141 static unsigned long cpu_evtchn_mask[MAX_VIRT_CPUS][NR_EVENT_CHANNELS/LONG_BIT];
142
143 #define active_evtchns(cpu,sh,idx)              \
144         ((sh)->evtchn_pending[idx] &            \
145          cpu_evtchn_mask[cpu][idx] &            \
146          ~(sh)->evtchn_mask[idx])
147
148 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
149 {
150         clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
151         set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
152         cpu_evtchn[chn] = cpu;
153 }
154
155 static void init_evtchn_cpu_bindings(void)
156 {
157         /* By default all event channels notify CPU#0. */
158         memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
159         memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
160 }
161
162 #define cpu_from_evtchn(evtchn)         (cpu_evtchn[evtchn])
163
164 #else
165
166 #define active_evtchns(cpu,sh,idx)              \
167         ((sh)->evtchn_pending[idx] &            \
168          ~(sh)->evtchn_mask[idx])
169 #define bind_evtchn_to_cpu(chn,cpu)     ((void)0)
170 #define init_evtchn_cpu_bindings()      ((void)0)
171 #define cpu_from_evtchn(evtchn)         (0)
172
173 #endif
174
175
176 /*
177  * Force a proper event-channel callback from Xen after clearing the
178  * callback mask. We do this in a very simple manner, by making a call
179  * down into Xen. The pending flag will be checked by Xen on return.
180  */
181 void force_evtchn_callback(void)
182 {
183         (void)HYPERVISOR_xen_version(0, NULL);
184 }
185
186 void 
187 evtchn_do_upcall(struct trapframe *frame) 
188 {
189         unsigned long  l1, l2;
190         unsigned int   l1i, l2i, port;
191         int            irq, cpu;
192         shared_info_t *s;
193         vcpu_info_t   *vcpu_info;
194         
195         cpu = PCPU_GET(cpuid);
196         s = HYPERVISOR_shared_info;
197         vcpu_info = &s->vcpu_info[cpu];
198
199         vcpu_info->evtchn_upcall_pending = 0;
200
201         /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
202         l1 = xen_xchg(&vcpu_info->evtchn_pending_sel, 0);
203
204         while (l1 != 0) {
205                 l1i = __ffs(l1);
206                 l1 &= ~(1 << l1i);
207                 
208                 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
209                         l2i = __ffs(l2);
210
211                         port = (l1i * LONG_BIT) + l2i;
212                         if ((irq = evtchn_to_irq[port]) != -1) {
213                                 struct intsrc *isrc = intr_lookup_source(irq);
214                                 /* 
215                                  * ack 
216                                  */
217                                 mask_evtchn(port);
218                                 clear_evtchn(port); 
219
220                                 intr_execute_handlers(isrc, frame);
221                         } else {
222                                 evtchn_device_upcall(port);
223                         }
224                 }
225         }
226 }
227
228 /*
229  * Send an IPI from the current CPU to the destination CPU.
230  */
231 void
232 ipi_pcpu(unsigned int cpu, int vector) 
233
234         int irq;
235
236         irq = pcpu_find(cpu)->pc_ipi_to_irq[vector];
237         
238         notify_remote_via_irq(irq); 
239
240
241 static int 
242 find_unbound_irq(void)
243 {
244         int dynirq, irq;
245         
246         for (dynirq = 0; dynirq < NR_IRQS; dynirq++) {
247                 irq = dynirq_to_irq(dynirq);
248                 if (irq_bindcount[irq] == 0)
249                         break;
250         }
251         
252         if (irq == NR_IRQS)
253                 panic("No available IRQ to bind to: increase NR_IRQS!\n");
254
255         return (irq);
256 }
257
258 static int
259 bind_caller_port_to_irq(unsigned int caller_port, int * port)
260 {
261         int irq;
262
263         mtx_lock_spin(&irq_mapping_update_lock);
264
265         if ((irq = evtchn_to_irq[caller_port]) == -1) {
266                 if ((irq = find_unbound_irq()) < 0)
267                         goto out;
268
269                 evtchn_to_irq[caller_port] = irq;
270                 irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
271         }
272
273         irq_bindcount[irq]++;
274         *port = caller_port;
275
276  out:
277         mtx_unlock_spin(&irq_mapping_update_lock);
278         return irq;
279 }
280
281 static int
282 bind_local_port_to_irq(unsigned int local_port, int * port)
283 {
284         int irq;
285
286         mtx_lock_spin(&irq_mapping_update_lock);
287
288         KASSERT(evtchn_to_irq[local_port] == -1,
289             ("evtchn_to_irq inconsistent"));
290         
291         if ((irq = find_unbound_irq()) < 0) {
292                 struct evtchn_close close = { .port = local_port };
293                 HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
294                 
295                 goto out;
296         }
297
298         evtchn_to_irq[local_port] = irq;
299         irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
300         irq_bindcount[irq]++;
301         *port = local_port;
302
303  out:
304         mtx_unlock_spin(&irq_mapping_update_lock);
305         return irq;
306 }
307
308 static int
309 bind_listening_port_to_irq(unsigned int remote_domain, int * port)
310 {
311         struct evtchn_alloc_unbound alloc_unbound;
312         int err;
313
314         alloc_unbound.dom        = DOMID_SELF;
315         alloc_unbound.remote_dom = remote_domain;
316
317         err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
318                                           &alloc_unbound);
319
320         return err ? : bind_local_port_to_irq(alloc_unbound.port, port);
321 }
322
323 static int
324 bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
325     unsigned int remote_port, int * port)
326 {
327         struct evtchn_bind_interdomain bind_interdomain;
328         int err;
329
330         bind_interdomain.remote_dom  = remote_domain;
331         bind_interdomain.remote_port = remote_port;
332
333         err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
334                                           &bind_interdomain);
335
336         return err ? : bind_local_port_to_irq(bind_interdomain.local_port, port);
337 }
338
339 static int 
340 bind_virq_to_irq(unsigned int virq, unsigned int cpu, int * port)
341 {
342         struct evtchn_bind_virq bind_virq;
343         int evtchn = 0, irq;
344
345         mtx_lock_spin(&irq_mapping_update_lock);
346
347         if ((irq = pcpu_find(cpu)->pc_virq_to_irq[virq]) == -1) {
348                 if ((irq = find_unbound_irq()) < 0)
349                         goto out;
350
351                 bind_virq.virq = virq;
352                 bind_virq.vcpu = cpu;
353                 HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
354
355                 evtchn = bind_virq.port;
356
357                 evtchn_to_irq[evtchn] = irq;
358                 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
359
360                 pcpu_find(cpu)->pc_virq_to_irq[virq] = irq;
361
362                 bind_evtchn_to_cpu(evtchn, cpu);
363         }
364
365         irq_bindcount[irq]++;
366         *port = evtchn;
367 out:
368         mtx_unlock_spin(&irq_mapping_update_lock);
369
370         return irq;
371 }
372
373
374 static int 
375 bind_ipi_to_irq(unsigned int ipi, unsigned int cpu, int * port)
376 {
377         struct evtchn_bind_ipi bind_ipi;
378         int irq;
379         int evtchn = 0;
380
381         mtx_lock_spin(&irq_mapping_update_lock);
382         
383         if ((irq = pcpu_find(cpu)->pc_ipi_to_irq[ipi]) == -1) {
384                 if ((irq = find_unbound_irq()) < 0)
385                         goto out;
386
387                 bind_ipi.vcpu = cpu;
388                 HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
389                 evtchn = bind_ipi.port;
390
391                 evtchn_to_irq[evtchn] = irq;
392                 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
393
394                 pcpu_find(cpu)->pc_ipi_to_irq[ipi] = irq;
395
396                 bind_evtchn_to_cpu(evtchn, cpu);
397         }
398         irq_bindcount[irq]++;
399         *port = evtchn;
400 out:
401         
402         mtx_unlock_spin(&irq_mapping_update_lock);
403
404         return irq;
405 }
406
407
408 static void 
409 unbind_from_irq(int irq)
410 {
411         struct evtchn_close close;
412         int evtchn = evtchn_from_irq(irq);
413         int cpu;
414
415         mtx_lock_spin(&irq_mapping_update_lock);
416
417         if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
418                 close.port = evtchn;
419                 HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
420
421                 switch (type_from_irq(irq)) {
422                 case IRQT_VIRQ:
423                         cpu = cpu_from_evtchn(evtchn);
424                         pcpu_find(cpu)->pc_virq_to_irq[index_from_irq(irq)] = -1;
425                         break;
426                 case IRQT_IPI:
427                         cpu = cpu_from_evtchn(evtchn);
428                         pcpu_find(cpu)->pc_ipi_to_irq[index_from_irq(irq)] = -1;
429                         break;
430                 default:
431                         break;
432                 }
433
434                 /* Closed ports are implicitly re-bound to VCPU0. */
435                 bind_evtchn_to_cpu(evtchn, 0);
436
437                 evtchn_to_irq[evtchn] = -1;
438                 irq_info[irq] = IRQ_UNBOUND;
439         }
440
441         mtx_unlock_spin(&irq_mapping_update_lock);
442 }
443
444 int 
445 bind_caller_port_to_irqhandler(unsigned int caller_port,
446     const char *devname, driver_intr_t handler, void *arg,
447     unsigned long irqflags, unsigned int *irqp)
448 {
449         unsigned int irq;
450         int port = -1;
451         int error;
452
453         irq = bind_caller_port_to_irq(caller_port, &port);
454         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
455         error = intr_add_handler(devname, irq, NULL, handler, arg, irqflags,
456             &xp->xp_pins[irq].xp_cookie);
457
458         if (error) {
459                 unbind_from_irq(irq);
460                 return (error);
461         }
462         if (port != -1)
463                 unmask_evtchn(port);
464
465         if (irqp)
466                 *irqp = irq;
467
468         return (0);
469 }
470
471 int 
472 bind_listening_port_to_irqhandler(unsigned int remote_domain,
473     const char *devname, driver_intr_t handler, void *arg,
474     unsigned long irqflags, unsigned int *irqp)
475 {
476         unsigned int irq;
477         int port = -1;
478         int error;
479
480         irq = bind_listening_port_to_irq(remote_domain, &port);
481         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
482         error = intr_add_handler(devname, irq, NULL, handler, arg, irqflags,
483             &xp->xp_pins[irq].xp_cookie);
484         if (error) {
485                 unbind_from_irq(irq);
486                 return (error);
487         }
488         if (port != -1)
489                 unmask_evtchn(port);
490         if (irqp)
491                 *irqp = irq;
492         
493         return (0);
494 }
495
496 int 
497 bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
498     unsigned int remote_port, const char *devname,
499     driver_intr_t handler, void *arg, unsigned long irqflags,
500     unsigned int *irqp)
501 {
502         unsigned int irq;
503         int port = -1;
504         int error;
505
506         irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port, &port);
507         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
508         error = intr_add_handler(devname, irq, NULL, handler, arg,
509             irqflags, &xp->xp_pins[irq].xp_cookie);
510         if (error) {
511                 unbind_from_irq(irq);
512                 return (error);
513         }
514         if (port != -1)
515                 unmask_evtchn(port);
516
517         if (irqp)
518                 *irqp = irq;
519         return (0);
520 }
521
522 int 
523 bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
524     const char *devname, driver_filter_t filter, driver_intr_t handler,
525     void *arg, unsigned long irqflags, unsigned int *irqp)
526 {
527         unsigned int irq;
528         int port = -1;
529         int error;
530
531         irq = bind_virq_to_irq(virq, cpu, &port);
532         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
533         error = intr_add_handler(devname, irq, filter, handler,
534             arg, irqflags, &xp->xp_pins[irq].xp_cookie);
535         if (error) {
536                 unbind_from_irq(irq);
537                 return (error);
538         }
539         if (port != -1)
540                 unmask_evtchn(port);
541
542         if (irqp)
543                 *irqp = irq;
544         return (0);
545 }
546
547 int 
548 bind_ipi_to_irqhandler(unsigned int ipi, unsigned int cpu,
549     const char *devname, driver_filter_t filter,
550     unsigned long irqflags, unsigned int *irqp)
551 {
552         unsigned int irq;
553         int port = -1;
554         int error;
555         
556         irq = bind_ipi_to_irq(ipi, cpu, &port);
557         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
558         error = intr_add_handler(devname, irq, filter, NULL,
559             NULL, irqflags, &xp->xp_pins[irq].xp_cookie);
560         if (error) {
561                 unbind_from_irq(irq);
562                 return (error);
563         }
564         if (port != -1)
565                 unmask_evtchn(port);
566
567         if (irqp)
568                 *irqp = irq;
569         return (0);
570 }
571
572 void
573 unbind_from_irqhandler(unsigned int irq)
574 {
575         intr_remove_handler(xp->xp_pins[irq].xp_cookie);
576         unbind_from_irq(irq);
577 }
578
579 #if 0
580 /* Rebind an evtchn so that it gets delivered to a specific cpu */
581 static void
582 rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
583 {
584         evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
585         int evtchn;
586
587         mtx_lock_spin(&irq_mapping_update_lock);
588
589         evtchn = evtchn_from_irq(irq);
590         if (!VALID_EVTCHN(evtchn)) {
591                 mtx_unlock_spin(&irq_mapping_update_lock);
592                 return;
593         }
594
595         /* Send future instances of this interrupt to other vcpu. */
596         bind_vcpu.port = evtchn;
597         bind_vcpu.vcpu = tcpu;
598
599         /*
600          * If this fails, it usually just indicates that we're dealing with a 
601          * virq or IPI channel, which don't actually need to be rebound. Ignore
602          * it, but don't do the xenlinux-level rebind in that case.
603          */
604         if (HYPERVISOR_event_channel_op(&op) >= 0)
605                 bind_evtchn_to_cpu(evtchn, tcpu);
606
607         mtx_unlock_spin(&irq_mapping_update_lock);
608
609 }
610
611 static void set_affinity_irq(unsigned irq, cpumask_t dest)
612 {
613         unsigned tcpu = ffs(dest) - 1;
614         rebind_irq_to_cpu(irq, tcpu);
615 }
616 #endif
617
618 /*
619  * Interface to generic handling in intr_machdep.c
620  */
621
622
623 /*------------ interrupt handling --------------------------------------*/
624 #define TODO            printf("%s: not implemented!\n", __func__) 
625
626
627 static void     xenpic_dynirq_enable_source(struct intsrc *isrc); 
628 static void     xenpic_dynirq_disable_source(struct intsrc *isrc, int); 
629 static void     xenpic_dynirq_eoi_source(struct intsrc *isrc); 
630 static void     xenpic_dynirq_enable_intr(struct intsrc *isrc); 
631
632 static void     xenpic_pirq_enable_source(struct intsrc *isrc); 
633 static void     xenpic_pirq_disable_source(struct intsrc *isrc, int); 
634 static void     xenpic_pirq_eoi_source(struct intsrc *isrc); 
635 static void     xenpic_pirq_enable_intr(struct intsrc *isrc); 
636
637
638 static int      xenpic_vector(struct intsrc *isrc); 
639 static int      xenpic_source_pending(struct intsrc *isrc); 
640 static void     xenpic_suspend(struct pic* pic); 
641 static void     xenpic_resume(struct pic* pic); 
642 static int      xenpic_assign_cpu(struct intsrc *, u_int apic_id);
643
644
645 struct pic xenpic_dynirq_template  =  { 
646         .pic_enable_source      =       xenpic_dynirq_enable_source, 
647         .pic_disable_source     =       xenpic_dynirq_disable_source,
648         .pic_eoi_source         =       xenpic_dynirq_eoi_source, 
649         .pic_enable_intr        =       xenpic_dynirq_enable_intr, 
650         .pic_vector             =       xenpic_vector, 
651         .pic_source_pending     =       xenpic_source_pending,
652         .pic_suspend            =       xenpic_suspend, 
653         .pic_resume             =       xenpic_resume 
654 };
655
656 struct pic xenpic_pirq_template  =  { 
657         .pic_enable_source      =       xenpic_pirq_enable_source, 
658         .pic_disable_source     =       xenpic_pirq_disable_source,
659         .pic_eoi_source         =       xenpic_pirq_eoi_source, 
660         .pic_enable_intr        =       xenpic_pirq_enable_intr, 
661         .pic_vector             =       xenpic_vector, 
662         .pic_source_pending     =       xenpic_source_pending,
663         .pic_suspend            =       xenpic_suspend, 
664         .pic_resume             =       xenpic_resume,
665         .pic_assign_cpu         =       xenpic_assign_cpu
666 };
667
668
669
670 void 
671 xenpic_dynirq_enable_source(struct intsrc *isrc)
672 {
673         unsigned int irq;
674         struct xenpic_intsrc *xp;
675
676         xp = (struct xenpic_intsrc *)isrc;
677         
678         mtx_lock_spin(&irq_mapping_update_lock);
679         if (xp->xp_masked) {
680                 irq = xenpic_vector(isrc);
681                 unmask_evtchn(evtchn_from_irq(irq));
682                 xp->xp_masked = FALSE;
683         }
684         mtx_unlock_spin(&irq_mapping_update_lock);
685 }
686
687 static void 
688 xenpic_dynirq_disable_source(struct intsrc *isrc, int foo)
689 {
690         unsigned int irq;
691         struct xenpic_intsrc *xp;
692         
693         xp = (struct xenpic_intsrc *)isrc;
694         
695         mtx_lock_spin(&irq_mapping_update_lock);
696         if (!xp->xp_masked) {
697                 irq = xenpic_vector(isrc);
698                 mask_evtchn(evtchn_from_irq(irq));
699                 xp->xp_masked = TRUE;
700         }       
701         mtx_unlock_spin(&irq_mapping_update_lock);
702 }
703
704 static void 
705 xenpic_dynirq_enable_intr(struct intsrc *isrc)
706 {
707         unsigned int irq;
708         struct xenpic_intsrc *xp;
709         
710         xp = (struct xenpic_intsrc *)isrc;      
711         mtx_lock_spin(&irq_mapping_update_lock);
712         xp->xp_masked = 0;
713         irq = xenpic_vector(isrc);
714         unmask_evtchn(evtchn_from_irq(irq));
715         mtx_unlock_spin(&irq_mapping_update_lock);
716 }
717
718 static void 
719 xenpic_dynirq_eoi_source(struct intsrc *isrc)
720 {
721         unsigned int irq;
722         struct xenpic_intsrc *xp;
723         
724         xp = (struct xenpic_intsrc *)isrc;      
725         mtx_lock_spin(&irq_mapping_update_lock);
726         xp->xp_masked = 0;
727         irq = xenpic_vector(isrc);
728         unmask_evtchn(evtchn_from_irq(irq));
729         mtx_unlock_spin(&irq_mapping_update_lock);
730 }
731
732 static int
733 xenpic_vector(struct intsrc *isrc)
734 {
735     struct xenpic_intsrc *pin;
736
737     pin = (struct xenpic_intsrc *)isrc;
738    //printf("xenpic_vector(): isrc=%p,vector=%u\n", pin, pin->xp_vector);
739
740     return (pin->xp_vector);
741 }
742
743 static int
744 xenpic_source_pending(struct intsrc *isrc)
745 {
746     struct xenpic_intsrc *pin = (struct xenpic_intsrc *)isrc;
747
748         /* XXXEN: TODO */
749         printf("xenpic_source_pending(): vector=%x,masked=%x\n",
750             pin->xp_vector, pin->xp_masked);
751
752 /*      notify_remote_via_evtchn(pin->xp_vector); // XXX RS: Is this correct? */
753         return 0;
754 }
755
756 static void 
757 xenpic_suspend(struct pic* pic)
758
759         TODO; 
760
761  
762 static void 
763 xenpic_resume(struct pic* pic)
764
765         TODO; 
766 }
767
768 static int
769 xenpic_assign_cpu(struct intsrc *isrc, u_int apic_id)
770
771         TODO; 
772         return (EOPNOTSUPP);
773 }
774
775 void
776 notify_remote_via_irq(int irq)
777 {
778         int evtchn = evtchn_from_irq(irq);
779
780         if (VALID_EVTCHN(evtchn))
781                 notify_remote_via_evtchn(evtchn);
782         else
783                 panic("invalid evtchn %d", irq);
784 }
785
786 /* required for support of physical devices */
787 static inline void 
788 pirq_unmask_notify(int pirq)
789 {
790         struct physdev_eoi eoi = { .irq = pirq };
791
792         if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
793                 (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
794         }
795 }
796
797 static inline void 
798 pirq_query_unmask(int pirq)
799 {
800         struct physdev_irq_status_query irq_status_query;
801
802         irq_status_query.irq = pirq;
803         (void)HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY, &irq_status_query);
804         clear_bit(pirq, &pirq_needs_unmask_notify[0]);
805         if ( irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
806                 set_bit(pirq, &pirq_needs_unmask_notify[0]);
807 }
808
809 /*
810  * On startup, if there is no action associated with the IRQ then we are
811  * probing. In this case we should not share with others as it will confuse us.
812  */
813 #define probing_irq(_irq) (intr_lookup_source(irq) == NULL)
814
815 static void 
816 xenpic_pirq_enable_intr(struct intsrc *isrc)
817 {
818         struct evtchn_bind_pirq bind_pirq;
819         int evtchn;
820         unsigned int irq;
821         
822         mtx_lock_spin(&irq_mapping_update_lock);
823         irq = xenpic_vector(isrc);
824         evtchn = evtchn_from_irq(irq);
825
826         if (VALID_EVTCHN(evtchn))
827                 goto out;
828
829         bind_pirq.pirq  = irq;
830         /* NB. We are happy to share unless we are probing. */
831         bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
832         
833         if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
834 #ifndef XEN_PRIVILEGED_GUEST
835                 panic("unexpected pirq call");
836 #endif
837                 if (!probing_irq(irq)) /* Some failures are expected when probing. */
838                         printf("Failed to obtain physical IRQ %d\n", irq);
839                 mtx_unlock_spin(&irq_mapping_update_lock);
840                 return;
841         }
842         evtchn = bind_pirq.port;
843
844         pirq_query_unmask(irq_to_pirq(irq));
845
846         bind_evtchn_to_cpu(evtchn, 0);
847         evtchn_to_irq[evtchn] = irq;
848         irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
849
850  out:
851         unmask_evtchn(evtchn);
852         pirq_unmask_notify(irq_to_pirq(irq));
853         mtx_unlock_spin(&irq_mapping_update_lock);
854 }
855
856 static void 
857 xenpic_pirq_enable_source(struct intsrc *isrc)
858 {
859         int evtchn;
860         unsigned int irq;
861
862         mtx_lock_spin(&irq_mapping_update_lock);
863         irq = xenpic_vector(isrc);
864         evtchn = evtchn_from_irq(irq);
865
866         if (!VALID_EVTCHN(evtchn))
867                 goto done;
868
869         unmask_evtchn(evtchn);
870         pirq_unmask_notify(irq_to_pirq(irq));
871  done:
872         mtx_unlock_spin(&irq_mapping_update_lock);
873 }
874
875 static void 
876 xenpic_pirq_disable_source(struct intsrc *isrc, int eoi)
877 {
878         int evtchn;
879         unsigned int irq;
880
881         mtx_lock_spin(&irq_mapping_update_lock);
882         irq = xenpic_vector(isrc);
883         evtchn = evtchn_from_irq(irq);
884
885         if (!VALID_EVTCHN(evtchn))
886                 goto done;
887
888         mask_evtchn(evtchn);
889  done:
890         mtx_unlock_spin(&irq_mapping_update_lock);
891 }
892
893
894 static void 
895 xenpic_pirq_eoi_source(struct intsrc *isrc)
896 {
897         int evtchn;
898         unsigned int irq;
899
900         mtx_lock_spin(&irq_mapping_update_lock);
901         irq = xenpic_vector(isrc);
902         evtchn = evtchn_from_irq(irq);
903
904         if (!VALID_EVTCHN(evtchn))
905                 goto done;
906
907         unmask_evtchn(evtchn);
908         pirq_unmask_notify(irq_to_pirq(irq));
909  done:
910         mtx_unlock_spin(&irq_mapping_update_lock);
911 }
912
913 int
914 irq_to_evtchn_port(int irq)
915 {
916         return evtchn_from_irq(irq);
917 }
918
919 void 
920 mask_evtchn(int port)
921 {
922         shared_info_t *s = HYPERVISOR_shared_info;
923         synch_set_bit(port, &s->evtchn_mask[0]);
924 }
925
926 void 
927 unmask_evtchn(int port)
928 {
929         shared_info_t *s = HYPERVISOR_shared_info;
930         unsigned int cpu = PCPU_GET(cpuid);
931         vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
932
933         /* Slow path (hypercall) if this is a non-local port. */
934         if (unlikely(cpu != cpu_from_evtchn(port))) {
935                 struct evtchn_unmask unmask = { .port = port };
936                 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
937                 return;
938         }
939
940         synch_clear_bit(port, &s->evtchn_mask);
941
942         /*
943          * The following is basically the equivalent of 'hw_resend_irq'. Just
944          * like a real IO-APIC we 'lose the interrupt edge' if the channel is
945          * masked.
946          */
947         if (synch_test_bit(port, &s->evtchn_pending) && 
948             !synch_test_and_set_bit(port / LONG_BIT,
949                                     &vcpu_info->evtchn_pending_sel)) {
950                 vcpu_info->evtchn_upcall_pending = 1;
951                 if (!vcpu_info->evtchn_upcall_mask)
952                         force_evtchn_callback();
953         }
954 }
955
956 void irq_resume(void)
957 {
958         evtchn_op_t op;
959         int         cpu, pirq, virq, ipi, irq, evtchn;
960
961         struct evtchn_bind_virq bind_virq;
962         struct evtchn_bind_ipi bind_ipi;        
963
964         init_evtchn_cpu_bindings();
965
966         /* New event-channel space is not 'live' yet. */
967         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
968                 mask_evtchn(evtchn);
969
970         /* Check that no PIRQs are still bound. */
971         for (pirq = 0; pirq < NR_PIRQS; pirq++) {
972                 KASSERT(irq_info[pirq_to_irq(pirq)] == IRQ_UNBOUND,
973                     ("pirq_to_irq inconsistent"));
974         }
975
976         /* Secondary CPUs must have no VIRQ or IPI bindings. */
977         for (cpu = 1; cpu < MAX_VIRT_CPUS; cpu++) {
978                 for (virq = 0; virq < NR_VIRQS; virq++) {
979                         KASSERT(pcpu_find(cpu)->pc_virq_to_irq[virq] == -1,
980                             ("virq_to_irq inconsistent"));
981                 }
982                 for (ipi = 0; ipi < NR_IPIS; ipi++) {
983                         KASSERT(pcpu_find(cpu)->pc_ipi_to_irq[ipi] == -1,
984                             ("ipi_to_irq inconsistent"));
985                 }
986         }
987
988         /* No IRQ <-> event-channel mappings. */
989         for (irq = 0; irq < NR_IRQS; irq++)
990                 irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
991         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
992                 evtchn_to_irq[evtchn] = -1;
993
994         /* Primary CPU: rebind VIRQs automatically. */
995         for (virq = 0; virq < NR_VIRQS; virq++) {
996                 if ((irq = pcpu_find(0)->pc_virq_to_irq[virq]) == -1)
997                         continue;
998
999                 KASSERT(irq_info[irq] == mk_irq_info(IRQT_VIRQ, virq, 0),
1000                     ("irq_info inconsistent"));
1001
1002                 /* Get a new binding from Xen. */
1003                 bind_virq.virq = virq;
1004                 bind_virq.vcpu = 0;
1005                 HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
1006                 evtchn = bind_virq.port;
1007         
1008                 /* Record the new mapping. */
1009                 evtchn_to_irq[evtchn] = irq;
1010                 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
1011
1012                 /* Ready for use. */
1013                 unmask_evtchn(evtchn);
1014         }
1015
1016         /* Primary CPU: rebind IPIs automatically. */
1017         for (ipi = 0; ipi < NR_IPIS; ipi++) {
1018                 if ((irq = pcpu_find(0)->pc_ipi_to_irq[ipi]) == -1)
1019                         continue;
1020
1021                 KASSERT(irq_info[irq] == mk_irq_info(IRQT_IPI, ipi, 0),
1022                     ("irq_info inconsistent"));
1023
1024                 /* Get a new binding from Xen. */
1025                 memset(&op, 0, sizeof(op));
1026                 bind_ipi.vcpu = 0;
1027                 HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
1028                 evtchn = bind_ipi.port;
1029         
1030                 /* Record the new mapping. */
1031                 evtchn_to_irq[evtchn] = irq;
1032                 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
1033
1034                 /* Ready for use. */
1035                 unmask_evtchn(evtchn);
1036         }
1037 }
1038
1039 static void 
1040 evtchn_init(void *dummy __unused)
1041 {
1042         int i, cpu;
1043         struct xenpic_intsrc *pin, *tpin;
1044
1045
1046         init_evtchn_cpu_bindings();
1047         
1048          /* No VIRQ or IPI bindings. */
1049         for (cpu = 0; cpu < mp_ncpus; cpu++) {
1050                 for (i = 0; i < NR_VIRQS; i++)
1051                         pcpu_find(cpu)->pc_virq_to_irq[i] = -1;
1052                 for (i = 0; i < NR_IPIS; i++)
1053                         pcpu_find(cpu)->pc_ipi_to_irq[i] = -1;
1054         }
1055
1056         /* No event-channel -> IRQ mappings. */
1057         for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1058                 evtchn_to_irq[i] = -1;
1059                 mask_evtchn(i); /* No event channels are 'live' right now. */
1060         }
1061
1062         /* No IRQ -> event-channel mappings. */
1063         for (i = 0; i < NR_IRQS; i++)
1064                 irq_info[i] = IRQ_UNBOUND;
1065         
1066         xp = malloc(sizeof(struct xenpic) + NR_IRQS*sizeof(struct xenpic_intsrc), 
1067                     M_DEVBUF, M_WAITOK);
1068
1069         xp->xp_dynirq_pic = &xenpic_dynirq_template;
1070         xp->xp_pirq_pic = &xenpic_pirq_template;
1071         xp->xp_numintr = NR_IRQS;
1072         bzero(xp->xp_pins, sizeof(struct xenpic_intsrc) * NR_IRQS);
1073
1074
1075         /* We need to register our PIC's beforehand */
1076         if (intr_register_pic(&xenpic_pirq_template))
1077                 panic("XEN: intr_register_pic() failure");
1078         if (intr_register_pic(&xenpic_dynirq_template))
1079                 panic("XEN: intr_register_pic() failure");
1080
1081         /*
1082          * Initialize the dynamic IRQ's - we initialize the structures, but
1083          * we do not bind them (bind_evtchn_to_irqhandle() does this)
1084          */
1085         pin = xp->xp_pins;
1086         for (i = 0; i < NR_DYNIRQS; i++) {
1087                 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
1088                 irq_bindcount[dynirq_to_irq(i)] = 0;
1089
1090                 tpin = &pin[dynirq_to_irq(i)];
1091                 tpin->xp_intsrc.is_pic = xp->xp_dynirq_pic;
1092                 tpin->xp_vector = dynirq_to_irq(i);
1093                 
1094         }
1095         /*
1096          * Now, we go ahead and claim every PIRQ there is.
1097          */
1098         pin = xp->xp_pins;
1099         for (i = 0; i < NR_PIRQS; i++) {
1100                 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
1101                 irq_bindcount[pirq_to_irq(i)] = 0;
1102
1103 #ifdef RTC_IRQ
1104                 /* If not domain 0, force our RTC driver to fail its probe. */
1105                 if ((i == RTC_IRQ) &&
1106                     !(xen_start_info->flags & SIF_INITDOMAIN))
1107                         continue;
1108 #endif
1109                 tpin = &pin[pirq_to_irq(i)];            
1110                 tpin->xp_intsrc.is_pic = xp->xp_pirq_pic;
1111                 tpin->xp_vector = pirq_to_irq(i);
1112
1113         }
1114 }
1115
1116 SYSINIT(evtchn_init, SI_SUB_INTR, SI_ORDER_MIDDLE, evtchn_init, NULL);
1117     /*
1118      * irq_mapping_update_lock: in order to allow an interrupt to occur in a critical
1119      *          section, to set pcpu->ipending (etc...) properly, we
1120      *          must be able to get the icu lock, so it can't be
1121      *          under witness.
1122      */
1123
1124 MTX_SYSINIT(irq_mapping_update_lock, &irq_mapping_update_lock, "xp", MTX_SPIN);