1 /******************************************************************************
4 * Communication via Xen event channels.
6 * Copyright (c) 2002-2005, K A Fraser
7 * Copyright (c) 2005-2006 Kip Macy
10 #include <sys/cdefs.h>
11 __FBSDID("$FreeBSD$");
13 #include <sys/param.h>
14 #include <sys/systm.h>
16 #include <sys/malloc.h>
17 #include <sys/kernel.h>
19 #include <sys/mutex.h>
20 #include <sys/interrupt.h>
23 #include <machine/cpufunc.h>
24 #include <machine/intr_machdep.h>
26 #include <machine/xen/xen-os.h>
27 #include <machine/xen/xenvar.h>
28 #include <machine/xen/xen_intr.h>
29 #include <machine/xen/synch_bitops.h>
30 #include <machine/xen/evtchn.h>
31 #include <machine/xen/hypervisor.h>
36 /* linux helper functions that got sucked in
41 static inline int find_first_bit(const unsigned long *addr, unsigned size)
46 /* This looks at memory. Mark it volatile to tell gcc not to move it around */
48 "xorl %%eax,%%eax\n\t"
51 "leal -4(%%edi),%%edi\n\t"
52 "bsfl (%%edi),%%eax\n"
53 "1:\tsubl %%ebx,%%edi\n\t"
56 :"=a" (res), "=&c" (d0), "=&D" (d1)
57 :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
61 #define min_t(type,x,y) \
62 ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
63 #define first_cpu(src) __first_cpu(&(src), NR_CPUS)
64 static inline int __first_cpu(const xen_cpumask_t *srcp, int nbits)
66 return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
69 static inline unsigned long __ffs(unsigned long word)
77 static struct mtx irq_mapping_update_lock;
78 static struct xenpic *xp;
79 struct xenpic_intsrc {
80 struct intsrc xp_intsrc;
86 struct pic *xp_dynirq_pic;
87 struct pic *xp_pirq_pic;
89 struct xenpic_intsrc xp_pins[0];
92 #define TODO printf("%s: not implemented!\n", __func__)
94 /* IRQ <-> event-channel mappings. */
95 static int evtchn_to_irq[NR_EVENT_CHANNELS];
97 /* Packed IRQ information: binding type, sub-type index, and event channel. */
98 static uint32_t irq_info[NR_IRQS];
113 #define _EVTCHN_BITS 12
114 #define _INDEX_BITS (32 - _IRQT_BITS - _EVTCHN_BITS)
116 /* Constructor for packed IRQ information. */
117 static inline uint32_t
118 mk_irq_info(uint32_t type, uint32_t index, uint32_t evtchn)
121 return ((type << (32 - _IRQT_BITS)) | (index << _EVTCHN_BITS) | evtchn);
124 /* Constructor for packed IRQ information. */
126 /* Convenient shorthand for packed representation of an unbound IRQ. */
127 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
130 * Accessors for packed IRQ information.
133 static inline unsigned int evtchn_from_irq(int irq)
135 return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
138 static inline unsigned int index_from_irq(int irq)
140 return (irq_info[irq] >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
143 static inline unsigned int type_from_irq(int irq)
145 return irq_info[irq] >> (32 - _IRQT_BITS);
149 /* IRQ <-> VIRQ mapping. */
151 /* IRQ <-> IPI mapping. */
154 #error "NR_IPIS not defined"
159 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
160 static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
162 /* Reference counts for bindings to IRQs. */
163 static int irq_bindcount[NR_IRQS];
165 #define VALID_EVTCHN(_chn) ((_chn) != 0)
169 static uint8_t cpu_evtchn[NR_EVENT_CHANNELS];
170 static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
172 #define active_evtchns(cpu,sh,idx) \
173 ((sh)->evtchn_pending[idx] & \
174 cpu_evtchn_mask[cpu][idx] & \
175 ~(sh)->evtchn_mask[idx])
177 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
179 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
180 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
181 cpu_evtchn[chn] = cpu;
184 static void init_evtchn_cpu_bindings(void)
186 /* By default all event channels notify CPU#0. */
187 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
188 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
191 #define cpu_from_evtchn(evtchn) (cpu_evtchn[evtchn])
195 #define active_evtchns(cpu,sh,idx) \
196 ((sh)->evtchn_pending[idx] & \
197 ~(sh)->evtchn_mask[idx])
198 #define bind_evtchn_to_cpu(chn,cpu) ((void)0)
199 #define init_evtchn_cpu_bindings() ((void)0)
200 #define cpu_from_evtchn(evtchn) (0)
206 * Force a proper event-channel callback from Xen after clearing the
207 * callback mask. We do this in a very simple manner, by making a call
208 * down into Xen. The pending flag will be checked by Xen on return.
210 void force_evtchn_callback(void)
212 (void)HYPERVISOR_xen_version(0, NULL);
216 evtchn_do_upcall(struct intrframe *frame)
218 unsigned long l1, l2;
219 unsigned int l1i, l2i, port;
222 vcpu_info_t *vcpu_info;
224 cpu = smp_processor_id();
225 s = HYPERVISOR_shared_info;
226 vcpu_info = &s->vcpu_info[cpu];
228 vcpu_info->evtchn_upcall_pending = 0;
230 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
231 l1 = xen_xchg(&vcpu_info->evtchn_pending_sel, 0);
237 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
240 port = (l1i * BITS_PER_LONG) + l2i;
241 if ((irq = evtchn_to_irq[port]) != -1) {
242 struct intsrc *isrc = intr_lookup_source(irq);
249 intr_execute_handlers(isrc, frame);
251 evtchn_device_upcall(port);
258 ipi_pcpu(unsigned int cpu, int vector)
262 irq = per_cpu(ipi_to_irq, cpu)[vector];
264 notify_remote_via_irq(irq);
268 find_unbound_irq(void)
272 for (dynirq = 0; dynirq < NR_IRQS; dynirq++) {
273 irq = dynirq_to_irq(dynirq);
274 if (irq_bindcount[irq] == 0)
279 panic("No available IRQ to bind to: increase NR_IRQS!\n");
285 bind_caller_port_to_irq(unsigned int caller_port)
289 mtx_lock_spin(&irq_mapping_update_lock);
291 if ((irq = evtchn_to_irq[caller_port]) == -1) {
292 if ((irq = find_unbound_irq()) < 0)
295 evtchn_to_irq[caller_port] = irq;
296 irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
299 irq_bindcount[irq]++;
302 mtx_unlock_spin(&irq_mapping_update_lock);
307 bind_local_port_to_irq(unsigned int local_port)
311 mtx_lock_spin(&irq_mapping_update_lock);
313 PANIC_IF(evtchn_to_irq[local_port] != -1);
315 if ((irq = find_unbound_irq()) < 0) {
316 struct evtchn_close close = { .port = local_port };
317 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_close, &close));
322 evtchn_to_irq[local_port] = irq;
323 irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
324 irq_bindcount[irq]++;
327 mtx_unlock_spin(&irq_mapping_update_lock);
332 bind_listening_port_to_irq(unsigned int remote_domain)
334 struct evtchn_alloc_unbound alloc_unbound;
337 alloc_unbound.dom = DOMID_SELF;
338 alloc_unbound.remote_dom = remote_domain;
340 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
343 return err ? : bind_local_port_to_irq(alloc_unbound.port);
347 bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
348 unsigned int remote_port)
350 struct evtchn_bind_interdomain bind_interdomain;
353 bind_interdomain.remote_dom = remote_domain;
354 bind_interdomain.remote_port = remote_port;
356 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
359 return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
363 bind_virq_to_irq(unsigned int virq, unsigned int cpu)
365 struct evtchn_bind_virq bind_virq;
368 mtx_lock_spin(&irq_mapping_update_lock);
370 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
371 if ((irq = find_unbound_irq()) < 0)
374 bind_virq.virq = virq;
375 bind_virq.vcpu = cpu;
376 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
379 evtchn = bind_virq.port;
381 evtchn_to_irq[evtchn] = irq;
382 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
384 per_cpu(virq_to_irq, cpu)[virq] = irq;
386 bind_evtchn_to_cpu(evtchn, cpu);
389 irq_bindcount[irq]++;
391 mtx_unlock_spin(&irq_mapping_update_lock);
397 extern int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu);
400 bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
402 struct evtchn_bind_ipi bind_ipi;
405 mtx_lock_spin(&irq_mapping_update_lock);
407 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
408 if ((irq = find_unbound_irq()) < 0)
412 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0);
413 evtchn = bind_ipi.port;
415 evtchn_to_irq[evtchn] = irq;
416 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
418 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
420 bind_evtchn_to_cpu(evtchn, cpu);
422 irq_bindcount[irq]++;
425 mtx_unlock_spin(&irq_mapping_update_lock);
432 unbind_from_irq(int irq)
434 struct evtchn_close close;
435 int evtchn = evtchn_from_irq(irq);
437 mtx_lock_spin(&irq_mapping_update_lock);
439 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
441 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0);
443 switch (type_from_irq(irq)) {
445 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))[index_from_irq(irq)] = -1;
448 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))[index_from_irq(irq)] = -1;
454 /* Closed ports are implicitly re-bound to VCPU0. */
455 bind_evtchn_to_cpu(evtchn, 0);
457 evtchn_to_irq[evtchn] = -1;
458 irq_info[irq] = IRQ_UNBOUND;
461 mtx_unlock_spin(&irq_mapping_update_lock);
465 bind_caller_port_to_irqhandler(unsigned int caller_port,
467 driver_intr_t handler,
469 unsigned long irqflags,
475 irq = bind_caller_port_to_irq(caller_port);
476 intr_register_source(&xp->xp_pins[irq].xp_intsrc);
477 retval = intr_add_handler(devname, irq, handler, arg, irqflags, cookiep);
479 unbind_from_irq(irq);
487 bind_listening_port_to_irqhandler(
488 unsigned int remote_domain,
490 driver_intr_t handler,
492 unsigned long irqflags,
498 irq = bind_listening_port_to_irq(remote_domain);
499 intr_register_source(&xp->xp_pins[irq].xp_intsrc);
500 retval = intr_add_handler(devname, irq, handler, arg, irqflags, cookiep);
502 unbind_from_irq(irq);
510 bind_interdomain_evtchn_to_irqhandler(
511 unsigned int remote_domain,
512 unsigned int remote_port,
514 driver_intr_t handler,
515 unsigned long irqflags)
520 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
521 intr_register_source(&xp->xp_pins[irq].xp_intsrc);
522 retval = intr_add_handler(devname, irq, handler, NULL, irqflags, NULL);
524 unbind_from_irq(irq);
532 bind_virq_to_irqhandler(unsigned int virq,
535 driver_intr_t handler,
536 unsigned long irqflags)
541 irq = bind_virq_to_irq(virq, cpu);
542 intr_register_source(&xp->xp_pins[irq].xp_intsrc);
543 retval = intr_add_handler(devname, irq, handler, NULL, irqflags, NULL);
545 unbind_from_irq(irq);
553 bind_ipi_to_irqhandler(unsigned int ipi,
556 driver_intr_t handler,
557 unsigned long irqflags)
561 irq = bind_ipi_to_irq(ipi, cpu);
562 intr_register_source(&xp->xp_pins[irq].xp_intsrc);
563 retval = intr_add_handler(devname, irq, handler, NULL, irqflags, NULL);
565 unbind_from_irq(irq);
573 unbind_from_irqhandler(unsigned int irq, void *dev_id)
576 intr_remove_handler(dev_id); /* XXX */
577 unbind_from_irq(irq);
581 /* Rebind an evtchn so that it gets delivered to a specific cpu */
583 rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
585 evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
588 mtx_lock_spin(&irq_mapping_update_lock);
590 evtchn = evtchn_from_irq(irq);
591 if (!VALID_EVTCHN(evtchn)) {
592 mtx_unlock_spin(&irq_mapping_update_lock);
596 /* Send future instances of this interrupt to other vcpu. */
597 bind_vcpu.port = evtchn;
598 bind_vcpu.vcpu = tcpu;
601 * If this fails, it usually just indicates that we're dealing with a
602 * virq or IPI channel, which don't actually need to be rebound. Ignore
603 * it, but don't do the xenlinux-level rebind in that case.
605 if (HYPERVISOR_event_channel_op(&op) >= 0)
606 bind_evtchn_to_cpu(evtchn, tcpu);
608 mtx_unlock_spin(&irq_mapping_update_lock);
612 static void set_affinity_irq(unsigned irq, xen_cpumask_t dest)
614 unsigned tcpu = first_cpu(dest);
615 rebind_irq_to_cpu(irq, tcpu);
620 * Interface to generic handling in intr_machdep.c
624 /*------------ interrupt handling --------------------------------------*/
625 #define TODO printf("%s: not implemented!\n", __func__)
628 static void xenpic_dynirq_enable_source(struct intsrc *isrc);
629 static void xenpic_dynirq_disable_source(struct intsrc *isrc, int);
630 static void xenpic_dynirq_eoi_source(struct intsrc *isrc);
631 static void xenpic_dynirq_enable_intr(struct intsrc *isrc);
633 static void xenpic_pirq_enable_source(struct intsrc *isrc);
634 static void xenpic_pirq_disable_source(struct intsrc *isrc, int);
635 static void xenpic_pirq_eoi_source(struct intsrc *isrc);
636 static void xenpic_pirq_enable_intr(struct intsrc *isrc);
639 static int xenpic_vector(struct intsrc *isrc);
640 static int xenpic_source_pending(struct intsrc *isrc);
641 static void xenpic_suspend(struct pic* pic);
642 static void xenpic_resume(struct pic* pic);
643 static void xenpic_assign_cpu(struct intsrc *, u_int apic_id);
646 struct pic xenpic_dynirq_template = {
647 .pic_enable_source = xenpic_dynirq_enable_source,
648 .pic_disable_source = xenpic_dynirq_disable_source,
649 .pic_eoi_source = xenpic_dynirq_eoi_source,
650 .pic_enable_intr = xenpic_dynirq_enable_intr,
651 .pic_vector = xenpic_vector,
652 .pic_source_pending = xenpic_source_pending,
653 .pic_suspend = xenpic_suspend,
654 .pic_resume = xenpic_resume
657 struct pic xenpic_pirq_template = {
658 .pic_enable_source = xenpic_pirq_enable_source,
659 .pic_disable_source = xenpic_pirq_disable_source,
660 .pic_eoi_source = xenpic_pirq_eoi_source,
661 .pic_enable_intr = xenpic_pirq_enable_intr,
662 .pic_vector = xenpic_vector,
663 .pic_source_pending = xenpic_source_pending,
664 .pic_suspend = xenpic_suspend,
665 .pic_resume = xenpic_resume,
666 .pic_assign_cpu = xenpic_assign_cpu
672 xenpic_dynirq_enable_source(struct intsrc *isrc)
675 struct xenpic_intsrc *xp;
677 xp = (struct xenpic_intsrc *)isrc;
679 mtx_lock_spin(&irq_mapping_update_lock);
681 irq = xenpic_vector(isrc);
682 unmask_evtchn(evtchn_from_irq(irq));
683 xp->xp_masked = FALSE;
685 mtx_unlock_spin(&irq_mapping_update_lock);
689 xenpic_dynirq_disable_source(struct intsrc *isrc, int foo)
692 struct xenpic_intsrc *xp;
694 xp = (struct xenpic_intsrc *)isrc;
696 mtx_lock_spin(&irq_mapping_update_lock);
697 if (!xp->xp_masked) {
698 irq = xenpic_vector(isrc);
699 mask_evtchn(evtchn_from_irq(irq));
700 xp->xp_masked = TRUE;
702 mtx_unlock_spin(&irq_mapping_update_lock);
706 xenpic_dynirq_enable_intr(struct intsrc *isrc)
709 struct xenpic_intsrc *xp;
711 xp = (struct xenpic_intsrc *)isrc;
712 mtx_lock_spin(&irq_mapping_update_lock);
714 irq = xenpic_vector(isrc);
715 unmask_evtchn(evtchn_from_irq(irq));
716 mtx_unlock_spin(&irq_mapping_update_lock);
720 xenpic_dynirq_eoi_source(struct intsrc *isrc)
723 struct xenpic_intsrc *xp;
725 xp = (struct xenpic_intsrc *)isrc;
726 mtx_lock_spin(&irq_mapping_update_lock);
728 irq = xenpic_vector(isrc);
729 unmask_evtchn(evtchn_from_irq(irq));
730 mtx_unlock_spin(&irq_mapping_update_lock);
734 xenpic_vector(struct intsrc *isrc)
736 struct xenpic_intsrc *pin;
738 pin = (struct xenpic_intsrc *)isrc;
739 //printf("xenpic_vector(): isrc=%p,vector=%u\n", pin, pin->xp_vector);
741 return (pin->xp_vector);
745 xenpic_source_pending(struct intsrc *isrc)
747 struct xenpic_intsrc *pin = (struct xenpic_intsrc *)isrc;
750 printf("xenpic_source_pending(): vector=%x,masked=%x\n",
751 pin->xp_vector, pin->xp_masked);
753 /* notify_remote_via_evtchn(pin->xp_vector); // XXX RS: Is this correct? */
758 xenpic_suspend(struct pic* pic)
764 xenpic_resume(struct pic* pic)
770 xenpic_assign_cpu(struct intsrc *isrc, u_int apic_id)
776 notify_remote_via_irq(int irq)
778 int evtchn = evtchn_from_irq(irq);
780 if (VALID_EVTCHN(evtchn))
781 notify_remote_via_evtchn(evtchn);
783 panic("invalid evtchn");
786 /* required for support of physical devices */
788 pirq_unmask_notify(int pirq)
790 struct physdev_eoi eoi = { .irq = pirq };
792 if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
793 (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
798 pirq_query_unmask(int pirq)
800 struct physdev_irq_status_query irq_status_query;
802 irq_status_query.irq = pirq;
803 (void)HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY, &irq_status_query);
804 clear_bit(pirq, &pirq_needs_unmask_notify[0]);
805 if ( irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
806 set_bit(pirq, &pirq_needs_unmask_notify[0]);
810 * On startup, if there is no action associated with the IRQ then we are
811 * probing. In this case we should not share with others as it will confuse us.
813 #define probing_irq(_irq) (intr_lookup_source(irq) == NULL)
816 xenpic_pirq_enable_intr(struct intsrc *isrc)
818 struct evtchn_bind_pirq bind_pirq;
822 mtx_lock_spin(&irq_mapping_update_lock);
823 irq = xenpic_vector(isrc);
824 evtchn = evtchn_from_irq(irq);
826 if (VALID_EVTCHN(evtchn))
829 bind_pirq.pirq = irq;
830 /* NB. We are happy to share unless we are probing. */
831 bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
833 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
834 #ifndef XEN_PRIVILEGED_GUEST
835 panic("unexpected pirq call");
837 if (!probing_irq(irq)) /* Some failures are expected when probing. */
838 printf("Failed to obtain physical IRQ %d\n", irq);
839 mtx_unlock_spin(&irq_mapping_update_lock);
842 evtchn = bind_pirq.port;
844 pirq_query_unmask(irq_to_pirq(irq));
846 bind_evtchn_to_cpu(evtchn, 0);
847 evtchn_to_irq[evtchn] = irq;
848 irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
851 unmask_evtchn(evtchn);
852 pirq_unmask_notify(irq_to_pirq(irq));
853 mtx_unlock_spin(&irq_mapping_update_lock);
857 xenpic_pirq_enable_source(struct intsrc *isrc)
862 mtx_lock_spin(&irq_mapping_update_lock);
863 irq = xenpic_vector(isrc);
864 evtchn = evtchn_from_irq(irq);
866 if (!VALID_EVTCHN(evtchn))
869 unmask_evtchn(evtchn);
870 pirq_unmask_notify(irq_to_pirq(irq));
872 mtx_unlock_spin(&irq_mapping_update_lock);
876 xenpic_pirq_disable_source(struct intsrc *isrc, int eoi)
881 mtx_lock_spin(&irq_mapping_update_lock);
882 irq = xenpic_vector(isrc);
883 evtchn = evtchn_from_irq(irq);
885 if (!VALID_EVTCHN(evtchn))
890 mtx_unlock_spin(&irq_mapping_update_lock);
895 xenpic_pirq_eoi_source(struct intsrc *isrc)
900 mtx_lock_spin(&irq_mapping_update_lock);
901 irq = xenpic_vector(isrc);
902 evtchn = evtchn_from_irq(irq);
904 if (!VALID_EVTCHN(evtchn))
907 unmask_evtchn(evtchn);
908 pirq_unmask_notify(irq_to_pirq(irq));
910 mtx_unlock_spin(&irq_mapping_update_lock);
914 irq_to_evtchn_port(int irq)
916 return evtchn_from_irq(irq);
920 mask_evtchn(int port)
922 shared_info_t *s = HYPERVISOR_shared_info;
923 synch_set_bit(port, &s->evtchn_mask[0]);
927 unmask_evtchn(int port)
929 shared_info_t *s = HYPERVISOR_shared_info;
930 unsigned int cpu = smp_processor_id();
931 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
933 /* Slow path (hypercall) if this is a non-local port. */
934 if (unlikely(cpu != cpu_from_evtchn(port))) {
935 struct evtchn_unmask unmask = { .port = port };
936 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
940 synch_clear_bit(port, &s->evtchn_mask);
943 * The following is basically the equivalent of 'hw_resend_irq'. Just
944 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
947 if (synch_test_bit(port, &s->evtchn_pending) &&
948 !synch_test_and_set_bit(port / BITS_PER_LONG,
949 &vcpu_info->evtchn_pending_sel)) {
950 vcpu_info->evtchn_upcall_pending = 1;
951 if (!vcpu_info->evtchn_upcall_mask)
952 force_evtchn_callback();
956 void irq_resume(void)
959 int cpu, pirq, virq, ipi, irq, evtchn;
961 struct evtchn_bind_virq bind_virq;
962 struct evtchn_bind_ipi bind_ipi;
964 init_evtchn_cpu_bindings();
966 /* New event-channel space is not 'live' yet. */
967 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
970 /* Check that no PIRQs are still bound. */
971 for (pirq = 0; pirq < NR_PIRQS; pirq++)
972 PANIC_IF(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
974 /* Secondary CPUs must have no VIRQ or IPI bindings. */
975 for (cpu = 1; cpu < NR_CPUS; cpu++) {
976 for (virq = 0; virq < NR_VIRQS; virq++)
977 PANIC_IF(per_cpu(virq_to_irq, cpu)[virq] != -1);
978 for (ipi = 0; ipi < NR_IPIS; ipi++)
979 PANIC_IF(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
982 /* No IRQ <-> event-channel mappings. */
983 for (irq = 0; irq < NR_IRQS; irq++)
984 irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
985 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
986 evtchn_to_irq[evtchn] = -1;
988 /* Primary CPU: rebind VIRQs automatically. */
989 for (virq = 0; virq < NR_VIRQS; virq++) {
990 if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
993 PANIC_IF(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
995 /* Get a new binding from Xen. */
996 bind_virq.virq = virq;
998 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq) != 0);
999 evtchn = bind_virq.port;
1001 /* Record the new mapping. */
1002 evtchn_to_irq[evtchn] = irq;
1003 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
1005 /* Ready for use. */
1006 unmask_evtchn(evtchn);
1009 /* Primary CPU: rebind IPIs automatically. */
1010 for (ipi = 0; ipi < NR_IPIS; ipi++) {
1011 if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
1014 PANIC_IF(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
1016 /* Get a new binding from Xen. */
1017 memset(&op, 0, sizeof(op));
1019 PANIC_IF(HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0);
1020 evtchn = bind_ipi.port;
1022 /* Record the new mapping. */
1023 evtchn_to_irq[evtchn] = irq;
1024 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
1026 /* Ready for use. */
1027 unmask_evtchn(evtchn);
1032 evtchn_init(void *dummy __unused)
1035 struct xenpic_intsrc *pin, *tpin;
1038 init_evtchn_cpu_bindings();
1040 /* No VIRQ or IPI bindings. */
1041 for (cpu = 0; cpu < mp_ncpus; cpu++) {
1042 for (i = 0; i < NR_VIRQS; i++)
1043 per_cpu(virq_to_irq, cpu)[i] = -1;
1044 for (i = 0; i < NR_IPIS; i++)
1045 per_cpu(ipi_to_irq, cpu)[i] = -1;
1048 /* No event-channel -> IRQ mappings. */
1049 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1050 evtchn_to_irq[i] = -1;
1051 mask_evtchn(i); /* No event channels are 'live' right now. */
1054 /* No IRQ -> event-channel mappings. */
1055 for (i = 0; i < NR_IRQS; i++)
1056 irq_info[i] = IRQ_UNBOUND;
1058 xp = malloc(sizeof(struct xenpic) + NR_IRQS*sizeof(struct xenpic_intsrc),
1059 M_DEVBUF, M_WAITOK);
1061 xp->xp_dynirq_pic = &xenpic_dynirq_template;
1062 xp->xp_pirq_pic = &xenpic_pirq_template;
1063 xp->xp_numintr = NR_IRQS;
1064 bzero(xp->xp_pins, sizeof(struct xenpic_intsrc) * NR_IRQS);
1067 /* We need to register our PIC's beforehand */
1068 if (intr_register_pic(&xenpic_pirq_template))
1069 panic("XEN: intr_register_pic() failure");
1070 if (intr_register_pic(&xenpic_dynirq_template))
1071 panic("XEN: intr_register_pic() failure");
1074 * Initialize the dynamic IRQ's - we initialize the structures, but
1075 * we do not bind them (bind_evtchn_to_irqhandle() does this)
1078 for (i = 0; i < NR_DYNIRQS; i++) {
1079 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
1080 irq_bindcount[dynirq_to_irq(i)] = 0;
1082 tpin = &pin[dynirq_to_irq(i)];
1083 tpin->xp_intsrc.is_pic = xp->xp_dynirq_pic;
1084 tpin->xp_vector = dynirq_to_irq(i);
1088 * Now, we go ahead and claim every PIRQ there is.
1091 for (i = 0; i < NR_PIRQS; i++) {
1092 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
1093 irq_bindcount[pirq_to_irq(i)] = 0;
1096 /* If not domain 0, force our RTC driver to fail its probe. */
1097 if ((i == RTC_IRQ) &&
1098 !(xen_start_info->flags & SIF_INITDOMAIN))
1101 tpin = &pin[pirq_to_irq(i)];
1102 tpin->xp_intsrc.is_pic = xp->xp_pirq_pic;
1103 tpin->xp_vector = pirq_to_irq(i);
1108 SYSINIT(evtchn_init, SI_SUB_INTR, SI_ORDER_MIDDLE, evtchn_init, NULL);
1110 * irq_mapping_update_lock: in order to allow an interrupt to occur in a critical
1111 * section, to set pcpu->ipending (etc...) properly, we
1112 * must be able to get the icu lock, so it can't be
1116 MTX_SYSINIT(irq_mapping_update_lock, &irq_mapping_update_lock, "xp", MTX_SPIN);