]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - sys/xen/evtchn/evtchn.c
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / sys / xen / evtchn / evtchn.c
1 /******************************************************************************
2  * evtchn.c
3  * 
4  * Communication via Xen event channels.
5  * 
6  * Copyright (c) 2002-2005, K A Fraser
7  * Copyright (c) 2005-2006 Kip Macy
8  */
9
10 #include <sys/cdefs.h>
11 __FBSDID("$FreeBSD$");
12
13 #include <sys/param.h>
14 #include <sys/systm.h>
15 #include <sys/bus.h>
16 #include <sys/limits.h>
17 #include <sys/malloc.h>
18 #include <sys/kernel.h>
19 #include <sys/lock.h>
20 #include <sys/mutex.h>
21 #include <sys/interrupt.h>
22 #include <sys/pcpu.h>
23 #include <sys/smp.h>
24
25 #include <machine/cpufunc.h>
26 #include <machine/intr_machdep.h>
27
28 #include <machine/xen/xen-os.h>
29 #include <machine/xen/xenvar.h>
30 #include <xen/xen_intr.h>
31 #include <machine/xen/synch_bitops.h>
32 #include <xen/evtchn.h>
33 #include <xen/hypervisor.h>
34 #include <sys/smp.h>
35
36 #include <xen/xen_intr.h>
37 #include <xen/evtchn.h>
38
39 static inline unsigned long __ffs(unsigned long word)
40 {
41         __asm__("bsfl %1,%0"
42                 :"=r" (word)
43                 :"rm" (word));
44         return word;
45 }
46
47 static struct mtx irq_mapping_update_lock;
48 static struct xenpic *xp;
49 struct xenpic_intsrc {
50         struct intsrc     xp_intsrc;
51         void              *xp_cookie;
52         uint8_t           xp_vector;
53         boolean_t         xp_masked;
54 };
55
56 struct xenpic { 
57         struct pic           *xp_dynirq_pic; 
58         struct pic           *xp_pirq_pic;   
59         uint16_t             xp_numintr; 
60         struct xenpic_intsrc xp_pins[0]; 
61 }; 
62
63 #define TODO            printf("%s: not implemented!\n", __func__) 
64
65 /* IRQ <-> event-channel mappings. */
66 static int evtchn_to_irq[NR_EVENT_CHANNELS];
67
68 /* Packed IRQ information: binding type, sub-type index, and event channel. */
69 static uint32_t irq_info[NR_IRQS];
70 /* Binding types. */
71 enum {
72         IRQT_UNBOUND,
73         IRQT_PIRQ,
74         IRQT_VIRQ,
75         IRQT_IPI,
76         IRQT_LOCAL_PORT,
77         IRQT_CALLER_PORT,
78         _IRQT_COUNT
79         
80 };
81
82
83 #define _IRQT_BITS 4
84 #define _EVTCHN_BITS 12
85 #define _INDEX_BITS (32 - _IRQT_BITS - _EVTCHN_BITS)
86
87 /* Constructor for packed IRQ information. */
88 static inline uint32_t
89 mk_irq_info(uint32_t type, uint32_t index, uint32_t evtchn)
90 {
91
92         return ((type << (32 - _IRQT_BITS)) | (index << _EVTCHN_BITS) | evtchn);
93 }
94
95 /* Constructor for packed IRQ information. */
96
97 /* Convenient shorthand for packed representation of an unbound IRQ. */
98 #define IRQ_UNBOUND     mk_irq_info(IRQT_UNBOUND, 0, 0)
99
100 /*
101  * Accessors for packed IRQ information.
102  */
103
104 static inline unsigned int evtchn_from_irq(int irq)
105 {
106         return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
107 }
108
109 static inline unsigned int index_from_irq(int irq)
110 {
111         return (irq_info[irq] >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
112 }
113
114 static inline unsigned int type_from_irq(int irq)
115 {
116         return irq_info[irq] >> (32 - _IRQT_BITS);
117 }
118
119
120 /* IRQ <-> VIRQ mapping. */ 
121  
122 /* IRQ <-> IPI mapping. */ 
123 #ifndef NR_IPIS
124 #ifdef SMP
125 #error "NR_IPIS not defined"
126 #endif
127 #define NR_IPIS 1 
128 #endif 
129
130 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
131 static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
132
133 /* Reference counts for bindings to IRQs. */
134 static int irq_bindcount[NR_IRQS];
135
136 #define VALID_EVTCHN(_chn) ((_chn) != 0)
137
138 #ifdef SMP
139
140 static uint8_t cpu_evtchn[NR_EVENT_CHANNELS];
141 static unsigned long cpu_evtchn_mask[MAX_VIRT_CPUS][NR_EVENT_CHANNELS/LONG_BIT];
142
143 #define active_evtchns(cpu,sh,idx)              \
144         ((sh)->evtchn_pending[idx] &            \
145          cpu_evtchn_mask[cpu][idx] &            \
146          ~(sh)->evtchn_mask[idx])
147
148 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
149 {
150         clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
151         set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
152         cpu_evtchn[chn] = cpu;
153 }
154
155 static void init_evtchn_cpu_bindings(void)
156 {
157         /* By default all event channels notify CPU#0. */
158         memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
159         memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
160 }
161
162 #define cpu_from_evtchn(evtchn)         (cpu_evtchn[evtchn])
163
164 #else
165
166 #define active_evtchns(cpu,sh,idx)              \
167         ((sh)->evtchn_pending[idx] &            \
168          ~(sh)->evtchn_mask[idx])
169 #define bind_evtchn_to_cpu(chn,cpu)     ((void)0)
170 #define init_evtchn_cpu_bindings()      ((void)0)
171 #define cpu_from_evtchn(evtchn)         (0)
172
173 #endif
174
175
176 /*
177  * Force a proper event-channel callback from Xen after clearing the
178  * callback mask. We do this in a very simple manner, by making a call
179  * down into Xen. The pending flag will be checked by Xen on return.
180  */
181 void force_evtchn_callback(void)
182 {
183         (void)HYPERVISOR_xen_version(0, NULL);
184 }
185
186 void 
187 evtchn_do_upcall(struct trapframe *frame) 
188 {
189         unsigned long  l1, l2;
190         unsigned int   l1i, l2i, port;
191         int            irq, cpu;
192         shared_info_t *s;
193         vcpu_info_t   *vcpu_info;
194         
195         cpu = PCPU_GET(cpuid);
196         s = HYPERVISOR_shared_info;
197         vcpu_info = &s->vcpu_info[cpu];
198
199         vcpu_info->evtchn_upcall_pending = 0;
200
201         /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
202         l1 = xen_xchg(&vcpu_info->evtchn_pending_sel, 0);
203
204         while (l1 != 0) {
205                 l1i = __ffs(l1);
206                 l1 &= ~(1 << l1i);
207                 
208                 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
209                         l2i = __ffs(l2);
210
211                         port = (l1i * LONG_BIT) + l2i;
212                         if ((irq = evtchn_to_irq[port]) != -1) {
213                                 struct intsrc *isrc = intr_lookup_source(irq);
214                                 /* 
215                                  * ack 
216                                  */
217                                 mask_evtchn(port);
218                                 clear_evtchn(port); 
219
220                                 intr_execute_handlers(isrc, frame);
221                         } else {
222                                 evtchn_device_upcall(port);
223                         }
224                 }
225         }
226 }
227
228 /*
229  * Send an IPI from the current CPU to the destination CPU.
230  */
231 void
232 ipi_pcpu(unsigned int cpu, int vector) 
233
234         int irq;
235
236         irq = pcpu_find(cpu)->pc_ipi_to_irq[vector];
237         
238         notify_remote_via_irq(irq); 
239
240
241 static int 
242 find_unbound_irq(void)
243 {
244         int dynirq, irq;
245         
246         for (dynirq = 0; dynirq < NR_IRQS; dynirq++) {
247                 irq = dynirq_to_irq(dynirq);
248                 if (irq_bindcount[irq] == 0)
249                         break;
250         }
251         
252         if (irq == NR_IRQS)
253                 panic("No available IRQ to bind to: increase NR_IRQS!\n");
254
255         return (irq);
256 }
257
258 static int
259 bind_caller_port_to_irq(unsigned int caller_port, int * port)
260 {
261         int irq;
262
263         mtx_lock_spin(&irq_mapping_update_lock);
264
265         if ((irq = evtchn_to_irq[caller_port]) == -1) {
266                 if ((irq = find_unbound_irq()) < 0)
267                         goto out;
268
269                 evtchn_to_irq[caller_port] = irq;
270                 irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
271         }
272
273         irq_bindcount[irq]++;
274         *port = caller_port;
275
276  out:
277         mtx_unlock_spin(&irq_mapping_update_lock);
278         return irq;
279 }
280
281 static int
282 bind_local_port_to_irq(unsigned int local_port, int * port)
283 {
284         int irq;
285
286         mtx_lock_spin(&irq_mapping_update_lock);
287
288         KASSERT(evtchn_to_irq[local_port] == -1,
289             ("evtchn_to_irq inconsistent"));
290         
291         if ((irq = find_unbound_irq()) < 0) {
292                 struct evtchn_close close = { .port = local_port };
293                 HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
294                 
295                 goto out;
296         }
297
298         evtchn_to_irq[local_port] = irq;
299         irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
300         irq_bindcount[irq]++;
301         *port = local_port;
302
303  out:
304         mtx_unlock_spin(&irq_mapping_update_lock);
305         return irq;
306 }
307
308 static int
309 bind_listening_port_to_irq(unsigned int remote_domain, int * port)
310 {
311         struct evtchn_alloc_unbound alloc_unbound;
312         int err;
313
314         alloc_unbound.dom        = DOMID_SELF;
315         alloc_unbound.remote_dom = remote_domain;
316
317         err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
318                                           &alloc_unbound);
319
320         return err ? : bind_local_port_to_irq(alloc_unbound.port, port);
321 }
322
323 static int
324 bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
325     unsigned int remote_port, int * port)
326 {
327         struct evtchn_bind_interdomain bind_interdomain;
328         int err;
329
330         bind_interdomain.remote_dom  = remote_domain;
331         bind_interdomain.remote_port = remote_port;
332
333         err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
334                                           &bind_interdomain);
335
336         return err ? : bind_local_port_to_irq(bind_interdomain.local_port, port);
337 }
338
339 static int 
340 bind_virq_to_irq(unsigned int virq, unsigned int cpu, int * port)
341 {
342         struct evtchn_bind_virq bind_virq;
343         int evtchn = 0, irq;
344
345         mtx_lock_spin(&irq_mapping_update_lock);
346
347         if ((irq = pcpu_find(cpu)->pc_virq_to_irq[virq]) == -1) {
348                 if ((irq = find_unbound_irq()) < 0)
349                         goto out;
350
351                 bind_virq.virq = virq;
352                 bind_virq.vcpu = cpu;
353                 HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
354
355                 evtchn = bind_virq.port;
356
357                 evtchn_to_irq[evtchn] = irq;
358                 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
359
360                 pcpu_find(cpu)->pc_virq_to_irq[virq] = irq;
361
362                 bind_evtchn_to_cpu(evtchn, cpu);
363         }
364
365         irq_bindcount[irq]++;
366         *port = evtchn;
367 out:
368         mtx_unlock_spin(&irq_mapping_update_lock);
369
370         return irq;
371 }
372
373
374 static int 
375 bind_ipi_to_irq(unsigned int ipi, unsigned int cpu, int * port)
376 {
377         struct evtchn_bind_ipi bind_ipi;
378         int irq;
379         int evtchn = 0;
380
381         mtx_lock_spin(&irq_mapping_update_lock);
382         
383         if ((irq = pcpu_find(cpu)->pc_ipi_to_irq[ipi]) == -1) {
384                 if ((irq = find_unbound_irq()) < 0)
385                         goto out;
386
387                 bind_ipi.vcpu = cpu;
388                 HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
389                 evtchn = bind_ipi.port;
390
391                 evtchn_to_irq[evtchn] = irq;
392                 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
393
394                 pcpu_find(cpu)->pc_ipi_to_irq[ipi] = irq;
395
396                 bind_evtchn_to_cpu(evtchn, cpu);
397         }
398         irq_bindcount[irq]++;
399         *port = evtchn;
400 out:
401         
402         mtx_unlock_spin(&irq_mapping_update_lock);
403
404         return irq;
405 }
406
407
408 static void 
409 unbind_from_irq(int irq)
410 {
411         struct evtchn_close close;
412         int evtchn = evtchn_from_irq(irq);
413         int cpu;
414
415         mtx_lock_spin(&irq_mapping_update_lock);
416
417         if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
418                 close.port = evtchn;
419                 HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
420
421                 switch (type_from_irq(irq)) {
422                 case IRQT_VIRQ:
423                         cpu = cpu_from_evtchn(evtchn);
424                         pcpu_find(cpu)->pc_virq_to_irq[index_from_irq(irq)] = -1;
425                         break;
426                 case IRQT_IPI:
427                         cpu = cpu_from_evtchn(evtchn);
428                         pcpu_find(cpu)->pc_ipi_to_irq[index_from_irq(irq)] = -1;
429                         break;
430                 default:
431                         break;
432                 }
433
434                 /* Closed ports are implicitly re-bound to VCPU0. */
435                 bind_evtchn_to_cpu(evtchn, 0);
436
437                 evtchn_to_irq[evtchn] = -1;
438                 irq_info[irq] = IRQ_UNBOUND;
439         }
440
441         mtx_unlock_spin(&irq_mapping_update_lock);
442 }
443
444 int 
445 bind_caller_port_to_irqhandler(unsigned int caller_port,
446     const char *devname, driver_intr_t handler, void *arg,
447     unsigned long irqflags, unsigned int *irqp)
448 {
449         unsigned int irq;
450         int port = -1;
451         int error;
452
453         irq = bind_caller_port_to_irq(caller_port, &port);
454         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
455         error = intr_add_handler(devname, irq, NULL, handler, arg, irqflags,
456             &xp->xp_pins[irq].xp_cookie);
457
458         if (error) {
459                 unbind_from_irq(irq);
460                 return (error);
461         }
462         if (port != -1)
463                 unmask_evtchn(port);
464
465         if (irqp)
466                 *irqp = irq;
467
468         return (0);
469 }
470
471 int 
472 bind_listening_port_to_irqhandler(unsigned int remote_domain,
473     const char *devname, driver_intr_t handler, void *arg,
474     unsigned long irqflags, unsigned int *irqp)
475 {
476         unsigned int irq;
477         int port = -1;
478         int error;
479
480         irq = bind_listening_port_to_irq(remote_domain, &port);
481         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
482         error = intr_add_handler(devname, irq, NULL, handler, arg, irqflags,
483             &xp->xp_pins[irq].xp_cookie);
484         if (error) {
485                 unbind_from_irq(irq);
486                 return (error);
487         }
488         if (port != -1)
489                 unmask_evtchn(port);
490         if (irqp)
491                 *irqp = irq;
492         
493         return (0);
494 }
495
496 int 
497 bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
498     unsigned int remote_port, const char *devname,
499     driver_intr_t handler, void *arg, unsigned long irqflags,
500     unsigned int *irqp)
501 {
502         unsigned int irq;
503         int port = -1;
504         int error;
505
506         irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port, &port);
507         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
508         error = intr_add_handler(devname, irq, NULL, handler, arg,
509             irqflags, &xp->xp_pins[irq].xp_cookie);
510         if (error) {
511                 unbind_from_irq(irq);
512                 return (error);
513         }
514         if (port != -1)
515                 unmask_evtchn(port);
516
517         if (irqp)
518                 *irqp = irq;
519         return (0);
520 }
521
522 int 
523 bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
524     const char *devname, driver_filter_t filter, driver_intr_t handler,
525     void *arg, unsigned long irqflags, unsigned int *irqp)
526 {
527         unsigned int irq;
528         int port = -1;
529         int error;
530
531         irq = bind_virq_to_irq(virq, cpu, &port);
532         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
533         error = intr_add_handler(devname, irq, filter, handler,
534             arg, irqflags, &xp->xp_pins[irq].xp_cookie);
535         if (error) {
536                 unbind_from_irq(irq);
537                 return (error);
538         }
539         if (port != -1)
540                 unmask_evtchn(port);
541
542         if (irqp)
543                 *irqp = irq;
544         return (0);
545 }
546
547 int 
548 bind_ipi_to_irqhandler(unsigned int ipi, unsigned int cpu,
549     const char *devname, driver_filter_t filter,
550     unsigned long irqflags, unsigned int *irqp)
551 {
552         unsigned int irq;
553         int port = -1;
554         int error;
555         
556         irq = bind_ipi_to_irq(ipi, cpu, &port);
557         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
558         error = intr_add_handler(devname, irq, filter, NULL,
559             NULL, irqflags, &xp->xp_pins[irq].xp_cookie);
560         if (error) {
561                 unbind_from_irq(irq);
562                 return (error);
563         }
564         if (port != -1)
565                 unmask_evtchn(port);
566
567         if (irqp)
568                 *irqp = irq;
569         return (0);
570 }
571
572 void
573 unbind_from_irqhandler(unsigned int irq)
574 {
575         intr_remove_handler(xp->xp_pins[irq].xp_cookie);
576         unbind_from_irq(irq);
577 }
578
579 #if 0
580 /* Rebind an evtchn so that it gets delivered to a specific cpu */
581 static void
582 rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
583 {
584         evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
585         int evtchn;
586
587         mtx_lock_spin(&irq_mapping_update_lock);
588
589         evtchn = evtchn_from_irq(irq);
590         if (!VALID_EVTCHN(evtchn)) {
591                 mtx_unlock_spin(&irq_mapping_update_lock);
592                 return;
593         }
594
595         /* Send future instances of this interrupt to other vcpu. */
596         bind_vcpu.port = evtchn;
597         bind_vcpu.vcpu = tcpu;
598
599         /*
600          * If this fails, it usually just indicates that we're dealing with a 
601          * virq or IPI channel, which don't actually need to be rebound. Ignore
602          * it, but don't do the xenlinux-level rebind in that case.
603          */
604         if (HYPERVISOR_event_channel_op(&op) >= 0)
605                 bind_evtchn_to_cpu(evtchn, tcpu);
606
607         mtx_unlock_spin(&irq_mapping_update_lock);
608
609 }
610
611 static void set_affinity_irq(unsigned irq, cpumask_t dest)
612 {
613         unsigned tcpu = ffs(dest) - 1;
614         rebind_irq_to_cpu(irq, tcpu);
615 }
616 #endif
617
618 /*
619  * Interface to generic handling in intr_machdep.c
620  */
621
622
623 /*------------ interrupt handling --------------------------------------*/
624 #define TODO            printf("%s: not implemented!\n", __func__) 
625
626
627 static void     xenpic_dynirq_enable_source(struct intsrc *isrc); 
628 static void     xenpic_dynirq_disable_source(struct intsrc *isrc, int); 
629 static void     xenpic_dynirq_eoi_source(struct intsrc *isrc); 
630 static void     xenpic_dynirq_enable_intr(struct intsrc *isrc); 
631 static void     xenpic_dynirq_disable_intr(struct intsrc *isrc); 
632
633 static void     xenpic_pirq_enable_source(struct intsrc *isrc); 
634 static void     xenpic_pirq_disable_source(struct intsrc *isrc, int); 
635 static void     xenpic_pirq_eoi_source(struct intsrc *isrc); 
636 static void     xenpic_pirq_enable_intr(struct intsrc *isrc); 
637
638
639 static int      xenpic_vector(struct intsrc *isrc); 
640 static int      xenpic_source_pending(struct intsrc *isrc); 
641 static void     xenpic_suspend(struct pic* pic); 
642 static void     xenpic_resume(struct pic* pic); 
643 static int      xenpic_assign_cpu(struct intsrc *, u_int apic_id);
644
645
646 struct pic xenpic_dynirq_template  =  { 
647         .pic_enable_source      =       xenpic_dynirq_enable_source, 
648         .pic_disable_source     =       xenpic_dynirq_disable_source,
649         .pic_eoi_source         =       xenpic_dynirq_eoi_source, 
650         .pic_enable_intr        =       xenpic_dynirq_enable_intr, 
651         .pic_disable_intr       =       xenpic_dynirq_disable_intr,
652         .pic_vector             =       xenpic_vector, 
653         .pic_source_pending     =       xenpic_source_pending,
654         .pic_suspend            =       xenpic_suspend, 
655         .pic_resume             =       xenpic_resume 
656 };
657
658 struct pic xenpic_pirq_template  =  { 
659         .pic_enable_source      =       xenpic_pirq_enable_source, 
660         .pic_disable_source     =       xenpic_pirq_disable_source,
661         .pic_eoi_source         =       xenpic_pirq_eoi_source, 
662         .pic_enable_intr        =       xenpic_pirq_enable_intr, 
663         .pic_vector             =       xenpic_vector, 
664         .pic_source_pending     =       xenpic_source_pending,
665         .pic_suspend            =       xenpic_suspend, 
666         .pic_resume             =       xenpic_resume,
667         .pic_assign_cpu         =       xenpic_assign_cpu
668 };
669
670
671
672 void 
673 xenpic_dynirq_enable_source(struct intsrc *isrc)
674 {
675         unsigned int irq;
676         struct xenpic_intsrc *xp;
677
678         xp = (struct xenpic_intsrc *)isrc;
679         
680         mtx_lock_spin(&irq_mapping_update_lock);
681         if (xp->xp_masked) {
682                 irq = xenpic_vector(isrc);
683                 unmask_evtchn(evtchn_from_irq(irq));
684                 xp->xp_masked = FALSE;
685         }
686         mtx_unlock_spin(&irq_mapping_update_lock);
687 }
688
689 static void 
690 xenpic_dynirq_disable_source(struct intsrc *isrc, int foo)
691 {
692         unsigned int irq;
693         struct xenpic_intsrc *xp;
694         
695         xp = (struct xenpic_intsrc *)isrc;
696         
697         mtx_lock_spin(&irq_mapping_update_lock);
698         if (!xp->xp_masked) {
699                 irq = xenpic_vector(isrc);
700                 mask_evtchn(evtchn_from_irq(irq));
701                 xp->xp_masked = TRUE;
702         }       
703         mtx_unlock_spin(&irq_mapping_update_lock);
704 }
705
706 static void 
707 xenpic_dynirq_enable_intr(struct intsrc *isrc)
708 {
709         unsigned int irq;
710         struct xenpic_intsrc *xp;
711         
712         xp = (struct xenpic_intsrc *)isrc;      
713         mtx_lock_spin(&irq_mapping_update_lock);
714         xp->xp_masked = 0;
715         irq = xenpic_vector(isrc);
716         unmask_evtchn(evtchn_from_irq(irq));
717         mtx_unlock_spin(&irq_mapping_update_lock);
718 }
719
720 static void 
721 xenpic_dynirq_disable_intr(struct intsrc *isrc)
722 {
723         unsigned int irq;
724         struct xenpic_intsrc *xp;
725         
726         xp = (struct xenpic_intsrc *)isrc;      
727         mtx_lock_spin(&irq_mapping_update_lock);
728         irq = xenpic_vector(isrc);
729         mask_evtchn(evtchn_from_irq(irq));
730         xp->xp_masked = 1;
731         mtx_unlock_spin(&irq_mapping_update_lock);
732 }
733
734 static void 
735 xenpic_dynirq_eoi_source(struct intsrc *isrc)
736 {
737         unsigned int irq;
738         struct xenpic_intsrc *xp;
739         
740         xp = (struct xenpic_intsrc *)isrc;      
741         mtx_lock_spin(&irq_mapping_update_lock);
742         xp->xp_masked = 0;
743         irq = xenpic_vector(isrc);
744         unmask_evtchn(evtchn_from_irq(irq));
745         mtx_unlock_spin(&irq_mapping_update_lock);
746 }
747
748 static int
749 xenpic_vector(struct intsrc *isrc)
750 {
751     struct xenpic_intsrc *pin;
752
753     pin = (struct xenpic_intsrc *)isrc;
754    //printf("xenpic_vector(): isrc=%p,vector=%u\n", pin, pin->xp_vector);
755
756     return (pin->xp_vector);
757 }
758
759 static int
760 xenpic_source_pending(struct intsrc *isrc)
761 {
762     struct xenpic_intsrc *pin = (struct xenpic_intsrc *)isrc;
763
764         /* XXXEN: TODO */
765         printf("xenpic_source_pending(): vector=%x,masked=%x\n",
766             pin->xp_vector, pin->xp_masked);
767
768 /*      notify_remote_via_evtchn(pin->xp_vector); // XXX RS: Is this correct? */
769         return 0;
770 }
771
772 static void 
773 xenpic_suspend(struct pic* pic)
774
775         TODO; 
776
777  
778 static void 
779 xenpic_resume(struct pic* pic)
780
781         TODO; 
782 }
783
784 static int
785 xenpic_assign_cpu(struct intsrc *isrc, u_int apic_id)
786
787         TODO; 
788         return (EOPNOTSUPP);
789 }
790
791 void
792 notify_remote_via_irq(int irq)
793 {
794         int evtchn = evtchn_from_irq(irq);
795
796         if (VALID_EVTCHN(evtchn))
797                 notify_remote_via_evtchn(evtchn);
798         else
799                 panic("invalid evtchn %d", irq);
800 }
801
802 /* required for support of physical devices */
803 static inline void 
804 pirq_unmask_notify(int pirq)
805 {
806         struct physdev_eoi eoi = { .irq = pirq };
807
808         if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
809                 (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
810         }
811 }
812
813 static inline void 
814 pirq_query_unmask(int pirq)
815 {
816         struct physdev_irq_status_query irq_status_query;
817
818         irq_status_query.irq = pirq;
819         (void)HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY, &irq_status_query);
820         clear_bit(pirq, &pirq_needs_unmask_notify[0]);
821         if ( irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
822                 set_bit(pirq, &pirq_needs_unmask_notify[0]);
823 }
824
825 /*
826  * On startup, if there is no action associated with the IRQ then we are
827  * probing. In this case we should not share with others as it will confuse us.
828  */
829 #define probing_irq(_irq) (intr_lookup_source(irq) == NULL)
830
831 static void 
832 xenpic_pirq_enable_intr(struct intsrc *isrc)
833 {
834         struct evtchn_bind_pirq bind_pirq;
835         int evtchn;
836         unsigned int irq;
837         
838         mtx_lock_spin(&irq_mapping_update_lock);
839         irq = xenpic_vector(isrc);
840         evtchn = evtchn_from_irq(irq);
841
842         if (VALID_EVTCHN(evtchn))
843                 goto out;
844
845         bind_pirq.pirq  = irq;
846         /* NB. We are happy to share unless we are probing. */
847         bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
848         
849         if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
850 #ifndef XEN_PRIVILEGED_GUEST
851                 panic("unexpected pirq call");
852 #endif
853                 if (!probing_irq(irq)) /* Some failures are expected when probing. */
854                         printf("Failed to obtain physical IRQ %d\n", irq);
855                 mtx_unlock_spin(&irq_mapping_update_lock);
856                 return;
857         }
858         evtchn = bind_pirq.port;
859
860         pirq_query_unmask(irq_to_pirq(irq));
861
862         bind_evtchn_to_cpu(evtchn, 0);
863         evtchn_to_irq[evtchn] = irq;
864         irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
865
866  out:
867         unmask_evtchn(evtchn);
868         pirq_unmask_notify(irq_to_pirq(irq));
869         mtx_unlock_spin(&irq_mapping_update_lock);
870 }
871
872 static void 
873 xenpic_pirq_enable_source(struct intsrc *isrc)
874 {
875         int evtchn;
876         unsigned int irq;
877
878         mtx_lock_spin(&irq_mapping_update_lock);
879         irq = xenpic_vector(isrc);
880         evtchn = evtchn_from_irq(irq);
881
882         if (!VALID_EVTCHN(evtchn))
883                 goto done;
884
885         unmask_evtchn(evtchn);
886         pirq_unmask_notify(irq_to_pirq(irq));
887  done:
888         mtx_unlock_spin(&irq_mapping_update_lock);
889 }
890
891 static void 
892 xenpic_pirq_disable_source(struct intsrc *isrc, int eoi)
893 {
894         int evtchn;
895         unsigned int irq;
896
897         mtx_lock_spin(&irq_mapping_update_lock);
898         irq = xenpic_vector(isrc);
899         evtchn = evtchn_from_irq(irq);
900
901         if (!VALID_EVTCHN(evtchn))
902                 goto done;
903
904         mask_evtchn(evtchn);
905  done:
906         mtx_unlock_spin(&irq_mapping_update_lock);
907 }
908
909
910 static void 
911 xenpic_pirq_eoi_source(struct intsrc *isrc)
912 {
913         int evtchn;
914         unsigned int irq;
915
916         mtx_lock_spin(&irq_mapping_update_lock);
917         irq = xenpic_vector(isrc);
918         evtchn = evtchn_from_irq(irq);
919
920         if (!VALID_EVTCHN(evtchn))
921                 goto done;
922
923         unmask_evtchn(evtchn);
924         pirq_unmask_notify(irq_to_pirq(irq));
925  done:
926         mtx_unlock_spin(&irq_mapping_update_lock);
927 }
928
929 int
930 irq_to_evtchn_port(int irq)
931 {
932         return evtchn_from_irq(irq);
933 }
934
935 void 
936 mask_evtchn(int port)
937 {
938         shared_info_t *s = HYPERVISOR_shared_info;
939         synch_set_bit(port, &s->evtchn_mask[0]);
940 }
941
942 void 
943 unmask_evtchn(int port)
944 {
945         shared_info_t *s = HYPERVISOR_shared_info;
946         unsigned int cpu = PCPU_GET(cpuid);
947         vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
948
949         /* Slow path (hypercall) if this is a non-local port. */
950         if (unlikely(cpu != cpu_from_evtchn(port))) {
951                 struct evtchn_unmask unmask = { .port = port };
952                 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
953                 return;
954         }
955
956         synch_clear_bit(port, &s->evtchn_mask);
957
958         /*
959          * The following is basically the equivalent of 'hw_resend_irq'. Just
960          * like a real IO-APIC we 'lose the interrupt edge' if the channel is
961          * masked.
962          */
963         if (synch_test_bit(port, &s->evtchn_pending) && 
964             !synch_test_and_set_bit(port / LONG_BIT,
965                                     &vcpu_info->evtchn_pending_sel)) {
966                 vcpu_info->evtchn_upcall_pending = 1;
967                 if (!vcpu_info->evtchn_upcall_mask)
968                         force_evtchn_callback();
969         }
970 }
971
972 void irq_resume(void)
973 {
974         evtchn_op_t op;
975         int         cpu, pirq, virq, ipi, irq, evtchn;
976
977         struct evtchn_bind_virq bind_virq;
978         struct evtchn_bind_ipi bind_ipi;        
979
980         init_evtchn_cpu_bindings();
981
982         /* New event-channel space is not 'live' yet. */
983         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
984                 mask_evtchn(evtchn);
985
986         /* Check that no PIRQs are still bound. */
987         for (pirq = 0; pirq < NR_PIRQS; pirq++) {
988                 KASSERT(irq_info[pirq_to_irq(pirq)] == IRQ_UNBOUND,
989                     ("pirq_to_irq inconsistent"));
990         }
991
992         /* Secondary CPUs must have no VIRQ or IPI bindings. */
993         for (cpu = 1; cpu < MAX_VIRT_CPUS; cpu++) {
994                 for (virq = 0; virq < NR_VIRQS; virq++) {
995                         KASSERT(pcpu_find(cpu)->pc_virq_to_irq[virq] == -1,
996                             ("virq_to_irq inconsistent"));
997                 }
998                 for (ipi = 0; ipi < NR_IPIS; ipi++) {
999                         KASSERT(pcpu_find(cpu)->pc_ipi_to_irq[ipi] == -1,
1000                             ("ipi_to_irq inconsistent"));
1001                 }
1002         }
1003
1004         /* No IRQ <-> event-channel mappings. */
1005         for (irq = 0; irq < NR_IRQS; irq++)
1006                 irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
1007         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1008                 evtchn_to_irq[evtchn] = -1;
1009
1010         /* Primary CPU: rebind VIRQs automatically. */
1011         for (virq = 0; virq < NR_VIRQS; virq++) {
1012                 if ((irq = pcpu_find(0)->pc_virq_to_irq[virq]) == -1)
1013                         continue;
1014
1015                 KASSERT(irq_info[irq] == mk_irq_info(IRQT_VIRQ, virq, 0),
1016                     ("irq_info inconsistent"));
1017
1018                 /* Get a new binding from Xen. */
1019                 bind_virq.virq = virq;
1020                 bind_virq.vcpu = 0;
1021                 HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
1022                 evtchn = bind_virq.port;
1023         
1024                 /* Record the new mapping. */
1025                 evtchn_to_irq[evtchn] = irq;
1026                 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
1027
1028                 /* Ready for use. */
1029                 unmask_evtchn(evtchn);
1030         }
1031
1032         /* Primary CPU: rebind IPIs automatically. */
1033         for (ipi = 0; ipi < NR_IPIS; ipi++) {
1034                 if ((irq = pcpu_find(0)->pc_ipi_to_irq[ipi]) == -1)
1035                         continue;
1036
1037                 KASSERT(irq_info[irq] == mk_irq_info(IRQT_IPI, ipi, 0),
1038                     ("irq_info inconsistent"));
1039
1040                 /* Get a new binding from Xen. */
1041                 memset(&op, 0, sizeof(op));
1042                 bind_ipi.vcpu = 0;
1043                 HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
1044                 evtchn = bind_ipi.port;
1045         
1046                 /* Record the new mapping. */
1047                 evtchn_to_irq[evtchn] = irq;
1048                 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
1049
1050                 /* Ready for use. */
1051                 unmask_evtchn(evtchn);
1052         }
1053 }
1054
1055 static void 
1056 evtchn_init(void *dummy __unused)
1057 {
1058         int i, cpu;
1059         struct xenpic_intsrc *pin, *tpin;
1060
1061
1062         init_evtchn_cpu_bindings();
1063         
1064          /* No VIRQ or IPI bindings. */
1065         for (cpu = 0; cpu < mp_ncpus; cpu++) {
1066                 for (i = 0; i < NR_VIRQS; i++)
1067                         pcpu_find(cpu)->pc_virq_to_irq[i] = -1;
1068                 for (i = 0; i < NR_IPIS; i++)
1069                         pcpu_find(cpu)->pc_ipi_to_irq[i] = -1;
1070         }
1071
1072         /* No event-channel -> IRQ mappings. */
1073         for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1074                 evtchn_to_irq[i] = -1;
1075                 mask_evtchn(i); /* No event channels are 'live' right now. */
1076         }
1077
1078         /* No IRQ -> event-channel mappings. */
1079         for (i = 0; i < NR_IRQS; i++)
1080                 irq_info[i] = IRQ_UNBOUND;
1081         
1082         xp = malloc(sizeof(struct xenpic) + NR_IRQS*sizeof(struct xenpic_intsrc), 
1083                     M_DEVBUF, M_WAITOK);
1084
1085         xp->xp_dynirq_pic = &xenpic_dynirq_template;
1086         xp->xp_pirq_pic = &xenpic_pirq_template;
1087         xp->xp_numintr = NR_IRQS;
1088         bzero(xp->xp_pins, sizeof(struct xenpic_intsrc) * NR_IRQS);
1089
1090
1091         /* We need to register our PIC's beforehand */
1092         if (intr_register_pic(&xenpic_pirq_template))
1093                 panic("XEN: intr_register_pic() failure");
1094         if (intr_register_pic(&xenpic_dynirq_template))
1095                 panic("XEN: intr_register_pic() failure");
1096
1097         /*
1098          * Initialize the dynamic IRQ's - we initialize the structures, but
1099          * we do not bind them (bind_evtchn_to_irqhandle() does this)
1100          */
1101         pin = xp->xp_pins;
1102         for (i = 0; i < NR_DYNIRQS; i++) {
1103                 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
1104                 irq_bindcount[dynirq_to_irq(i)] = 0;
1105
1106                 tpin = &pin[dynirq_to_irq(i)];
1107                 tpin->xp_intsrc.is_pic = xp->xp_dynirq_pic;
1108                 tpin->xp_vector = dynirq_to_irq(i);
1109                 
1110         }
1111         /*
1112          * Now, we go ahead and claim every PIRQ there is.
1113          */
1114         pin = xp->xp_pins;
1115         for (i = 0; i < NR_PIRQS; i++) {
1116                 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
1117                 irq_bindcount[pirq_to_irq(i)] = 0;
1118
1119 #ifdef RTC_IRQ
1120                 /* If not domain 0, force our RTC driver to fail its probe. */
1121                 if ((i == RTC_IRQ) &&
1122                     !(xen_start_info->flags & SIF_INITDOMAIN))
1123                         continue;
1124 #endif
1125                 tpin = &pin[pirq_to_irq(i)];            
1126                 tpin->xp_intsrc.is_pic = xp->xp_pirq_pic;
1127                 tpin->xp_vector = pirq_to_irq(i);
1128
1129         }
1130 }
1131
1132 SYSINIT(evtchn_init, SI_SUB_INTR, SI_ORDER_MIDDLE, evtchn_init, NULL);
1133     /*
1134      * irq_mapping_update_lock: in order to allow an interrupt to occur in a critical
1135      *          section, to set pcpu->ipending (etc...) properly, we
1136      *          must be able to get the icu lock, so it can't be
1137      *          under witness.
1138      */
1139
1140 MTX_SYSINIT(irq_mapping_update_lock, &irq_mapping_update_lock, "xp", MTX_SPIN);