1 /******************************************************************************
4 * Communication via Xen event channels.
6 * Copyright (c) 2002-2005, K A Fraser
7 * Copyright (c) 2005-2006 Kip Macy
10 #include <sys/cdefs.h>
11 __FBSDID("$FreeBSD$");
13 #include <sys/param.h>
14 #include <sys/systm.h>
16 #include <sys/malloc.h>
17 #include <sys/kernel.h>
19 #include <sys/mutex.h>
20 #include <sys/interrupt.h>
22 #include <machine/cpufunc.h>
23 #include <machine/intr_machdep.h>
24 #include <machine/xen/xen-os.h>
25 #include <machine/xen/xen_intr.h>
26 #include <machine/xen/synch_bitops.h>
27 #include <machine/xen/evtchn.h>
28 #include <machine/xen/hypervisor.h>
32 /* linux helper functions that got sucked in
37 static inline int find_first_bit(const unsigned long *addr, unsigned size)
42 /* This looks at memory. Mark it volatile to tell gcc not to move it around */
44 "xorl %%eax,%%eax\n\t"
47 "leal -4(%%edi),%%edi\n\t"
48 "bsfl (%%edi),%%eax\n"
49 "1:\tsubl %%ebx,%%edi\n\t"
52 :"=a" (res), "=&c" (d0), "=&D" (d1)
53 :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
57 #define min_t(type,x,y) \
58 ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
59 #define first_cpu(src) __first_cpu(&(src), NR_CPUS)
60 static inline int __first_cpu(const xen_cpumask_t *srcp, int nbits)
62 return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
65 static inline unsigned long __ffs(unsigned long word)
73 static struct mtx irq_mapping_update_lock;
74 static struct xenpic *xp;
75 struct xenpic_intsrc {
76 struct intsrc xp_intsrc;
82 struct pic *xp_dynirq_pic;
83 struct pic *xp_pirq_pic;
85 struct xenpic_intsrc xp_pins[0];
88 #define TODO printf("%s: not implemented!\n", __func__)
90 /* IRQ <-> event-channel mappings. */
91 static int evtchn_to_irq[NR_EVENT_CHANNELS];
93 /* Packed IRQ information: binding type, sub-type index, and event channel. */
94 static uint32_t irq_info[NR_IRQS];
105 /* Constructor for packed IRQ information. */
106 #define mk_irq_info(type, index, evtchn) \
107 (((uint32_t)(type) << 24) | ((uint32_t)(index) << 16) | (uint32_t)(evtchn))
108 /* Convenient shorthand for packed representation of an unbound IRQ. */
109 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
110 /* Accessor macros for packed IRQ information. */
111 #define evtchn_from_irq(irq) ((uint16_t)(irq_info[irq]))
112 #define index_from_irq(irq) ((uint8_t)(irq_info[irq] >> 16))
113 #define type_from_irq(irq) ((uint8_t)(irq_info[irq] >> 24))
115 /* IRQ <-> VIRQ mapping. */
116 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
118 /* IRQ <-> IPI mapping. */
122 DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
124 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
125 static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
127 /* Reference counts for bindings to IRQs. */
128 static int irq_bindcount[NR_IRQS];
130 #define VALID_EVTCHN(_chn) ((_chn) != 0)
134 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
135 static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
137 #define active_evtchns(cpu,sh,idx) \
138 ((sh)->evtchn_pending[idx] & \
139 cpu_evtchn_mask[cpu][idx] & \
140 ~(sh)->evtchn_mask[idx])
142 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
144 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
145 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
146 cpu_evtchn[chn] = cpu;
149 static void init_evtchn_cpu_bindings(void)
151 /* By default all event channels notify CPU#0. */
152 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
153 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
156 #define cpu_from_evtchn(evtchn) (cpu_evtchn[evtchn])
160 #define active_evtchns(cpu,sh,idx) \
161 ((sh)->evtchn_pending[idx] & \
162 ~(sh)->evtchn_mask[idx])
163 #define bind_evtchn_to_cpu(chn,cpu) ((void)0)
164 #define init_evtchn_cpu_bindings() ((void)0)
165 #define cpu_from_evtchn(evtchn) (0)
171 * Force a proper event-channel callback from Xen after clearing the
172 * callback mask. We do this in a very simple manner, by making a call
173 * down into Xen. The pending flag will be checked by Xen on return.
175 void force_evtchn_callback(void)
177 (void)HYPERVISOR_xen_version(0, NULL);
181 evtchn_do_upcall(struct trapframe *frame)
183 unsigned long l1, l2;
184 unsigned int l1i, l2i, port;
187 vcpu_info_t *vcpu_info;
189 cpu = smp_processor_id();
190 s = HYPERVISOR_shared_info;
191 vcpu_info = &s->vcpu_info[cpu];
193 vcpu_info->evtchn_upcall_pending = 0;
195 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
196 l1 = xen_xchg(&vcpu_info->evtchn_pending_sel, 0);
202 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
205 port = (l1i * BITS_PER_LONG) + l2i;
206 if ((irq = evtchn_to_irq[port]) != -1) {
207 struct intsrc *isrc = intr_lookup_source(irq);
214 intr_execute_handlers(isrc, frame);
216 evtchn_device_upcall(port);
223 ipi_pcpu(unsigned int cpu, int vector)
225 int irq = per_cpu(ipi_to_irq, cpu)[vector];
227 notify_remote_via_irq(irq);
231 find_unbound_irq(void)
235 for (dynirq = 0; dynirq < NR_IRQS; dynirq++) {
236 irq = dynirq_to_irq(dynirq);
237 if (irq_bindcount[irq] == 0)
242 panic("No available IRQ to bind to: increase NR_IRQS!\n");
248 bind_caller_port_to_irq(unsigned int caller_port)
252 mtx_lock_spin(&irq_mapping_update_lock);
254 if ((irq = evtchn_to_irq[caller_port]) == -1) {
255 if ((irq = find_unbound_irq()) < 0)
258 evtchn_to_irq[caller_port] = irq;
259 irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
262 irq_bindcount[irq]++;
265 mtx_unlock_spin(&irq_mapping_update_lock);
270 bind_local_port_to_irq(unsigned int local_port)
274 mtx_lock_spin(&irq_mapping_update_lock);
276 PANIC_IF(evtchn_to_irq[local_port] != -1);
278 if ((irq = find_unbound_irq()) < 0) {
279 struct evtchn_close close = { .port = local_port };
280 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_close, &close));
285 evtchn_to_irq[local_port] = irq;
286 irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
287 irq_bindcount[irq]++;
290 mtx_unlock_spin(&irq_mapping_update_lock);
295 bind_listening_port_to_irq(unsigned int remote_domain)
297 struct evtchn_alloc_unbound alloc_unbound;
300 alloc_unbound.dom = DOMID_SELF;
301 alloc_unbound.remote_dom = remote_domain;
303 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
306 return err ? : bind_local_port_to_irq(alloc_unbound.port);
310 bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
311 unsigned int remote_port)
313 struct evtchn_bind_interdomain bind_interdomain;
316 bind_interdomain.remote_dom = remote_domain;
317 bind_interdomain.remote_port = remote_port;
319 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
322 return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
326 bind_virq_to_irq(unsigned int virq, unsigned int cpu)
328 struct evtchn_bind_virq bind_virq;
331 mtx_lock_spin(&irq_mapping_update_lock);
333 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
334 bind_virq.virq = virq;
335 bind_virq.vcpu = cpu;
336 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
339 evtchn = bind_virq.port;
341 irq = find_unbound_irq();
342 evtchn_to_irq[evtchn] = irq;
343 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
345 per_cpu(virq_to_irq, cpu)[virq] = irq;
347 bind_evtchn_to_cpu(evtchn, cpu);
350 irq_bindcount[irq]++;
352 mtx_unlock_spin(&irq_mapping_update_lock);
358 bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
360 struct evtchn_bind_ipi bind_ipi;
363 mtx_lock_spin(&irq_mapping_update_lock);
365 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
366 if ((irq = find_unbound_irq()) < 0)
370 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0);
371 evtchn = bind_ipi.port;
373 irq = find_unbound_irq();
374 evtchn_to_irq[evtchn] = irq;
375 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
377 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
379 bind_evtchn_to_cpu(evtchn, cpu);
382 irq_bindcount[irq]++;
385 mtx_unlock_spin(&irq_mapping_update_lock);
392 unbind_from_irq(int irq)
394 struct evtchn_close close;
395 int evtchn = evtchn_from_irq(irq);
397 mtx_lock_spin(&irq_mapping_update_lock);
399 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
401 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0);
403 switch (type_from_irq(irq)) {
405 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))[index_from_irq(irq)] = -1;
408 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))[index_from_irq(irq)] = -1;
414 /* Closed ports are implicitly re-bound to VCPU0. */
415 bind_evtchn_to_cpu(evtchn, 0);
417 evtchn_to_irq[evtchn] = -1;
418 irq_info[irq] = IRQ_UNBOUND;
421 mtx_unlock_spin(&irq_mapping_update_lock);
425 bind_caller_port_to_irqhandler(unsigned int caller_port,
427 driver_intr_t handler,
429 unsigned long irqflags,
435 irq = bind_caller_port_to_irq(caller_port);
436 intr_register_source(&xp->xp_pins[irq].xp_intsrc);
437 retval = intr_add_handler(devname, irq, NULL, handler, arg, irqflags, cookiep);
439 unbind_from_irq(irq);
447 bind_listening_port_to_irqhandler(
448 unsigned int remote_domain,
450 driver_intr_t handler,
452 unsigned long irqflags,
458 irq = bind_listening_port_to_irq(remote_domain);
459 intr_register_source(&xp->xp_pins[irq].xp_intsrc);
460 retval = intr_add_handler(devname, irq, NULL, handler, arg, irqflags, cookiep);
462 unbind_from_irq(irq);
470 bind_interdomain_evtchn_to_irqhandler(
471 unsigned int remote_domain,
472 unsigned int remote_port,
474 driver_filter_t filter,
475 driver_intr_t handler,
476 unsigned long irqflags)
481 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
482 intr_register_source(&xp->xp_pins[irq].xp_intsrc);
483 retval = intr_add_handler(devname, irq, filter, handler, NULL, irqflags, NULL);
485 unbind_from_irq(irq);
493 bind_virq_to_irqhandler(unsigned int virq,
496 driver_filter_t filter,
497 driver_intr_t handler,
498 unsigned long irqflags)
503 irq = bind_virq_to_irq(virq, cpu);
504 intr_register_source(&xp->xp_pins[irq].xp_intsrc);
505 retval = intr_add_handler(devname, irq, filter, handler, NULL, irqflags, NULL);
507 unbind_from_irq(irq);
515 bind_ipi_to_irqhandler(unsigned int ipi,
518 driver_intr_t handler,
519 unsigned long irqflags)
524 irq = bind_ipi_to_irq(ipi, cpu);
525 intr_register_source(&xp->xp_pins[irq].xp_intsrc);
526 retval = intr_add_handler(devname, irq, NULL, handler, NULL, irqflags, NULL);
528 unbind_from_irq(irq);
536 unbind_from_irqhandler(unsigned int irq, void *dev_id)
539 intr_remove_handler(dev_id); /* XXX */
540 unbind_from_irq(irq);
544 /* Rebind an evtchn so that it gets delivered to a specific cpu */
546 rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
548 evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
551 mtx_lock_spin(&irq_mapping_update_lock);
553 evtchn = evtchn_from_irq(irq);
554 if (!VALID_EVTCHN(evtchn)) {
555 mtx_unlock_spin(&irq_mapping_update_lock);
559 /* Send future instances of this interrupt to other vcpu. */
560 bind_vcpu.port = evtchn;
561 bind_vcpu.vcpu = tcpu;
564 * If this fails, it usually just indicates that we're dealing with a
565 * virq or IPI channel, which don't actually need to be rebound. Ignore
566 * it, but don't do the xenlinux-level rebind in that case.
568 if (HYPERVISOR_event_channel_op(&op) >= 0)
569 bind_evtchn_to_cpu(evtchn, tcpu);
571 mtx_unlock_spin(&irq_mapping_update_lock);
575 static void set_affinity_irq(unsigned irq, xen_cpumask_t dest)
577 unsigned tcpu = first_cpu(dest);
578 rebind_irq_to_cpu(irq, tcpu);
583 * Interface to generic handling in intr_machdep.c
587 /*------------ interrupt handling --------------------------------------*/
588 #define TODO printf("%s: not implemented!\n", __func__)
591 static void xenpic_dynirq_enable_source(struct intsrc *isrc);
592 static void xenpic_dynirq_disable_source(struct intsrc *isrc, int);
593 static void xenpic_dynirq_eoi_source(struct intsrc *isrc);
594 static void xenpic_dynirq_enable_intr(struct intsrc *isrc);
595 static void xenpic_dynirq_disable_intr(struct intsrc *isrc);
597 static void xenpic_pirq_enable_source(struct intsrc *isrc);
598 static void xenpic_pirq_disable_source(struct intsrc *isrc, int);
599 static void xenpic_pirq_eoi_source(struct intsrc *isrc);
600 static void xenpic_pirq_enable_intr(struct intsrc *isrc);
601 static void xenpic_pirq_disable_intr(struct intsrc *isrc);
604 static int xenpic_vector(struct intsrc *isrc);
605 static int xenpic_source_pending(struct intsrc *isrc);
606 static void xenpic_suspend(struct pic* pic);
607 static void xenpic_resume(struct pic* pic);
608 static void xenpic_assign_cpu(struct intsrc *, u_int apic_id);
611 struct pic xenpic_dynirq_template = {
612 .pic_enable_source = xenpic_dynirq_enable_source,
613 .pic_disable_source = xenpic_dynirq_disable_source,
614 .pic_eoi_source = xenpic_dynirq_eoi_source,
615 .pic_enable_intr = xenpic_dynirq_enable_intr,
616 .pic_disable_intr = xenpic_dynirq_disable_intr,
617 .pic_vector = xenpic_vector,
618 .pic_source_pending = xenpic_source_pending,
619 .pic_suspend = xenpic_suspend,
620 .pic_resume = xenpic_resume
623 struct pic xenpic_pirq_template = {
624 .pic_enable_source = xenpic_pirq_enable_source,
625 .pic_disable_source = xenpic_pirq_disable_source,
626 .pic_eoi_source = xenpic_pirq_eoi_source,
627 .pic_enable_intr = xenpic_pirq_enable_intr,
628 .pic_disable_intr = xenpic_pirq_disable_intr,
629 .pic_vector = xenpic_vector,
630 .pic_source_pending = xenpic_source_pending,
631 .pic_suspend = xenpic_suspend,
632 .pic_resume = xenpic_resume,
633 .pic_assign_cpu = xenpic_assign_cpu
639 xenpic_dynirq_enable_source(struct intsrc *isrc)
642 struct xenpic_intsrc *xp;
644 xp = (struct xenpic_intsrc *)isrc;
646 mtx_lock_spin(&irq_mapping_update_lock);
648 irq = xenpic_vector(isrc);
649 unmask_evtchn(evtchn_from_irq(irq));
650 xp->xp_masked = FALSE;
652 mtx_unlock_spin(&irq_mapping_update_lock);
656 xenpic_dynirq_disable_source(struct intsrc *isrc, int foo)
659 struct xenpic_intsrc *xp;
661 xp = (struct xenpic_intsrc *)isrc;
663 mtx_lock_spin(&irq_mapping_update_lock);
664 if (!xp->xp_masked) {
665 irq = xenpic_vector(isrc);
666 mask_evtchn(evtchn_from_irq(irq));
667 xp->xp_masked = TRUE;
669 mtx_unlock_spin(&irq_mapping_update_lock);
673 xenpic_dynirq_enable_intr(struct intsrc *isrc)
676 struct xenpic_intsrc *xp;
678 xp = (struct xenpic_intsrc *)isrc;
679 mtx_lock_spin(&irq_mapping_update_lock);
681 irq = xenpic_vector(isrc);
682 unmask_evtchn(evtchn_from_irq(irq));
683 mtx_unlock_spin(&irq_mapping_update_lock);
687 xenpic_dynirq_disable_intr(struct intsrc *isrc)
690 struct xenpic_intsrc *xp;
692 xp = (struct xenpic_intsrc *)isrc;
693 mtx_lock_spin(&irq_mapping_update_lock);
695 irq = xenpic_vector(isrc);
696 mask_evtchn(evtchn_from_irq(irq));
697 mtx_unlock_spin(&irq_mapping_update_lock);
701 xenpic_dynirq_eoi_source(struct intsrc *isrc)
704 struct xenpic_intsrc *xp;
706 xp = (struct xenpic_intsrc *)isrc;
707 mtx_lock_spin(&irq_mapping_update_lock);
709 irq = xenpic_vector(isrc);
710 unmask_evtchn(evtchn_from_irq(irq));
711 mtx_unlock_spin(&irq_mapping_update_lock);
715 xenpic_vector(struct intsrc *isrc)
717 struct xenpic_intsrc *pin;
719 pin = (struct xenpic_intsrc *)isrc;
720 //printf("xenpic_vector(): isrc=%p,vector=%u\n", pin, pin->xp_vector);
722 return (pin->xp_vector);
726 xenpic_source_pending(struct intsrc *isrc)
728 struct xenpic_intsrc *pin = (struct xenpic_intsrc *)isrc;
731 printf("xenpic_source_pending(): vector=%x,masked=%x\n",
732 pin->xp_vector, pin->xp_masked);
734 /* notify_remote_via_evtchn(pin->xp_vector); // XXX RS: Is this correct? */
739 xenpic_suspend(struct pic* pic)
745 xenpic_resume(struct pic* pic)
751 xenpic_assign_cpu(struct intsrc *isrc, u_int apic_id)
757 notify_remote_via_irq(int irq)
759 int evtchn = evtchn_from_irq(irq);
761 if (VALID_EVTCHN(evtchn))
762 notify_remote_via_evtchn(evtchn);
765 /* required for support of physical devices */
767 pirq_unmask_notify(int pirq)
769 struct physdev_eoi eoi = { .irq = pirq };
771 if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
772 (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
777 pirq_query_unmask(int pirq)
779 struct physdev_irq_status_query irq_status_query;
781 irq_status_query.irq = pirq;
782 (void)HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY, &irq_status_query);
783 clear_bit(pirq, &pirq_needs_unmask_notify[0]);
784 if ( irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
785 set_bit(pirq, &pirq_needs_unmask_notify[0]);
789 * On startup, if there is no action associated with the IRQ then we are
790 * probing. In this case we should not share with others as it will confuse us.
792 #define probing_irq(_irq) (intr_lookup_source(irq) == NULL)
795 xenpic_pirq_enable_intr(struct intsrc *isrc)
797 struct evtchn_bind_pirq bind_pirq;
801 mtx_lock_spin(&irq_mapping_update_lock);
802 irq = xenpic_vector(isrc);
803 evtchn = evtchn_from_irq(irq);
805 if (VALID_EVTCHN(evtchn))
808 bind_pirq.pirq = irq;
809 /* NB. We are happy to share unless we are probing. */
810 bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
812 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
813 if (!probing_irq(irq)) /* Some failures are expected when probing. */
814 printf("Failed to obtain physical IRQ %d\n", irq);
815 mtx_unlock_spin(&irq_mapping_update_lock);
818 evtchn = bind_pirq.port;
820 pirq_query_unmask(irq_to_pirq(irq));
822 bind_evtchn_to_cpu(evtchn, 0);
823 evtchn_to_irq[evtchn] = irq;
824 irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
827 unmask_evtchn(evtchn);
828 pirq_unmask_notify(irq_to_pirq(irq));
829 mtx_unlock_spin(&irq_mapping_update_lock);
833 xenpic_pirq_disable_intr(struct intsrc *isrc)
837 struct evtchn_close close;
839 mtx_lock_spin(&irq_mapping_update_lock);
840 irq = xenpic_vector(isrc);
841 evtchn = evtchn_from_irq(irq);
843 if (!VALID_EVTCHN(evtchn))
849 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0);
851 bind_evtchn_to_cpu(evtchn, 0);
852 evtchn_to_irq[evtchn] = -1;
853 irq_info[irq] = IRQ_UNBOUND;
855 mtx_unlock_spin(&irq_mapping_update_lock);
859 xenpic_pirq_enable_source(struct intsrc *isrc)
864 mtx_lock_spin(&irq_mapping_update_lock);
865 irq = xenpic_vector(isrc);
866 evtchn = evtchn_from_irq(irq);
868 if (!VALID_EVTCHN(evtchn))
871 unmask_evtchn(evtchn);
872 pirq_unmask_notify(irq_to_pirq(irq));
874 mtx_unlock_spin(&irq_mapping_update_lock);
878 xenpic_pirq_disable_source(struct intsrc *isrc, int eoi)
883 mtx_lock_spin(&irq_mapping_update_lock);
884 irq = xenpic_vector(isrc);
885 evtchn = evtchn_from_irq(irq);
887 if (!VALID_EVTCHN(evtchn))
892 mtx_unlock_spin(&irq_mapping_update_lock);
897 xenpic_pirq_eoi_source(struct intsrc *isrc)
902 mtx_lock_spin(&irq_mapping_update_lock);
903 irq = xenpic_vector(isrc);
904 evtchn = evtchn_from_irq(irq);
906 if (!VALID_EVTCHN(evtchn))
909 unmask_evtchn(evtchn);
910 pirq_unmask_notify(irq_to_pirq(irq));
912 mtx_unlock_spin(&irq_mapping_update_lock);
916 irq_to_evtchn_port(int irq)
918 return evtchn_from_irq(irq);
922 mask_evtchn(int port)
924 shared_info_t *s = HYPERVISOR_shared_info;
925 synch_set_bit(port, &s->evtchn_mask[0]);
929 unmask_evtchn(int port)
931 shared_info_t *s = HYPERVISOR_shared_info;
932 unsigned int cpu = smp_processor_id();
933 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
935 /* Slow path (hypercall) if this is a non-local port. */
936 if (unlikely(cpu != cpu_from_evtchn(port))) {
937 struct evtchn_unmask unmask = { .port = port };
938 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
942 synch_clear_bit(port, &s->evtchn_mask);
945 * The following is basically the equivalent of 'hw_resend_irq'. Just
946 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
949 if (synch_test_bit(port, &s->evtchn_pending) &&
950 !synch_test_and_set_bit(port / BITS_PER_LONG,
951 &vcpu_info->evtchn_pending_sel)) {
952 vcpu_info->evtchn_upcall_pending = 1;
953 if (!vcpu_info->evtchn_upcall_mask)
954 force_evtchn_callback();
958 void irq_resume(void)
961 int cpu, pirq, virq, ipi, irq, evtchn;
963 struct evtchn_bind_virq bind_virq;
964 struct evtchn_bind_ipi bind_ipi;
966 init_evtchn_cpu_bindings();
968 /* New event-channel space is not 'live' yet. */
969 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
972 /* Check that no PIRQs are still bound. */
973 for (pirq = 0; pirq < NR_PIRQS; pirq++)
974 PANIC_IF(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
976 /* Secondary CPUs must have no VIRQ or IPI bindings. */
977 for (cpu = 1; cpu < NR_CPUS; cpu++) {
978 for (virq = 0; virq < NR_VIRQS; virq++)
979 PANIC_IF(per_cpu(virq_to_irq, cpu)[virq] != -1);
980 for (ipi = 0; ipi < NR_IPIS; ipi++)
981 PANIC_IF(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
984 /* No IRQ <-> event-channel mappings. */
985 for (irq = 0; irq < NR_IRQS; irq++)
986 irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
987 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
988 evtchn_to_irq[evtchn] = -1;
990 /* Primary CPU: rebind VIRQs automatically. */
991 for (virq = 0; virq < NR_VIRQS; virq++) {
992 if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
995 PANIC_IF(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
997 /* Get a new binding from Xen. */
998 bind_virq.virq = virq;
1000 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq) != 0);
1001 evtchn = bind_virq.port;
1003 /* Record the new mapping. */
1004 evtchn_to_irq[evtchn] = irq;
1005 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
1007 /* Ready for use. */
1008 unmask_evtchn(evtchn);
1011 /* Primary CPU: rebind IPIs automatically. */
1012 for (ipi = 0; ipi < NR_IPIS; ipi++) {
1013 if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
1016 PANIC_IF(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
1018 /* Get a new binding from Xen. */
1019 memset(&op, 0, sizeof(op));
1021 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0);
1022 evtchn = bind_ipi.port;
1024 /* Record the new mapping. */
1025 evtchn_to_irq[evtchn] = irq;
1026 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
1028 /* Ready for use. */
1029 unmask_evtchn(evtchn);
1034 evtchn_init(void *dummy __unused)
1037 struct xenpic_intsrc *pin, *tpin;
1039 /* No VIRQ or IPI bindings. */
1040 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1041 for (i = 0; i < NR_VIRQS; i++)
1042 per_cpu(virq_to_irq, cpu)[i] = -1;
1043 for (i = 0; i < NR_IPIS; i++)
1044 per_cpu(ipi_to_irq, cpu)[i] = -1;
1047 /* No event-channel -> IRQ mappings. */
1048 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1049 evtchn_to_irq[i] = -1;
1050 mask_evtchn(i); /* No event channels are 'live' right now. */
1053 /* No IRQ -> event-channel mappings. */
1054 for (i = 0; i < NR_IRQS; i++)
1055 irq_info[i] = IRQ_UNBOUND;
1057 xp = malloc(sizeof(struct xenpic) + NR_IRQS*sizeof(struct xenpic_intsrc),
1058 M_DEVBUF, M_WAITOK);
1060 xp->xp_dynirq_pic = &xenpic_dynirq_template;
1061 xp->xp_pirq_pic = &xenpic_pirq_template;
1062 xp->xp_numintr = NR_IRQS;
1063 bzero(xp->xp_pins, sizeof(struct xenpic_intsrc) * NR_IRQS);
1066 /* We need to register our PIC's beforehand */
1067 if (intr_register_pic(&xenpic_pirq_template))
1068 panic("XEN: intr_register_pic() failure");
1069 if (intr_register_pic(&xenpic_dynirq_template))
1070 panic("XEN: intr_register_pic() failure");
1073 * Initialize the dynamic IRQ's - we initialize the structures, but
1074 * we do not bind them (bind_evtchn_to_irqhandle() does this)
1077 for (i = 0; i < NR_DYNIRQS; i++) {
1078 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
1079 irq_bindcount[dynirq_to_irq(i)] = 0;
1081 tpin = &pin[dynirq_to_irq(i)];
1082 tpin->xp_intsrc.is_pic = xp->xp_dynirq_pic;
1083 tpin->xp_vector = dynirq_to_irq(i);
1087 * Now, we go ahead and claim every PIRQ there is.
1090 for (i = 0; i < NR_PIRQS; i++) {
1091 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
1092 irq_bindcount[pirq_to_irq(i)] = 0;
1095 /* If not domain 0, force our RTC driver to fail its probe. */
1096 if ((i == RTC_IRQ) &&
1097 !(xen_start_info->flags & SIF_INITDOMAIN))
1100 tpin = &pin[pirq_to_irq(i)];
1101 tpin->xp_intsrc.is_pic = xp->xp_pirq_pic;
1102 tpin->xp_vector = pirq_to_irq(i);
1107 SYSINIT(evtchn_init, SI_SUB_INTR, SI_ORDER_ANY, evtchn_init, NULL);
1109 * irq_mapping_update_lock: in order to allow an interrupt to occur in a critical
1110 * section, to set pcpu->ipending (etc...) properly, we
1111 * must be able to get the icu lock, so it can't be
1115 MTX_SYSINIT(irq_mapping_update_lock, &irq_mapping_update_lock, "xp", MTX_SPIN);