1 /******************************************************************************
4 * Communication via Xen event channels.
6 * Copyright (c) 2002-2005, K A Fraser
7 * Copyright (c) 2005-2006 Kip Macy
10 #include <sys/cdefs.h>
11 __FBSDID("$FreeBSD$");
13 #include <sys/param.h>
14 #include <sys/systm.h>
16 #include <sys/limits.h>
17 #include <sys/malloc.h>
18 #include <sys/kernel.h>
20 #include <sys/mutex.h>
21 #include <sys/interrupt.h>
25 #include <machine/cpufunc.h>
26 #include <machine/intr_machdep.h>
28 #include <machine/xen/xen-os.h>
29 #include <machine/xen/xenvar.h>
30 #include <xen/xen_intr.h>
31 #include <machine/xen/synch_bitops.h>
32 #include <xen/evtchn.h>
33 #include <xen/hypervisor.h>
36 #include <xen/xen_intr.h>
37 #include <xen/evtchn.h>
39 static inline unsigned long __ffs(unsigned long word)
47 static struct mtx irq_mapping_update_lock;
48 static struct xenpic *xp;
49 struct xenpic_intsrc {
50 struct intsrc xp_intsrc;
57 struct pic *xp_dynirq_pic;
58 struct pic *xp_pirq_pic;
60 struct xenpic_intsrc xp_pins[0];
63 #define TODO printf("%s: not implemented!\n", __func__)
65 /* IRQ <-> event-channel mappings. */
66 static int evtchn_to_irq[NR_EVENT_CHANNELS];
68 /* Packed IRQ information: binding type, sub-type index, and event channel. */
69 static uint32_t irq_info[NR_IRQS];
84 #define _EVTCHN_BITS 12
85 #define _INDEX_BITS (32 - _IRQT_BITS - _EVTCHN_BITS)
87 /* Constructor for packed IRQ information. */
88 static inline uint32_t
89 mk_irq_info(uint32_t type, uint32_t index, uint32_t evtchn)
92 return ((type << (32 - _IRQT_BITS)) | (index << _EVTCHN_BITS) | evtchn);
95 /* Constructor for packed IRQ information. */
97 /* Convenient shorthand for packed representation of an unbound IRQ. */
98 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
101 * Accessors for packed IRQ information.
104 static inline unsigned int evtchn_from_irq(int irq)
106 return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
109 static inline unsigned int index_from_irq(int irq)
111 return (irq_info[irq] >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
114 static inline unsigned int type_from_irq(int irq)
116 return irq_info[irq] >> (32 - _IRQT_BITS);
120 /* IRQ <-> VIRQ mapping. */
122 /* IRQ <-> IPI mapping. */
125 #error "NR_IPIS not defined"
130 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
131 static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
133 /* Reference counts for bindings to IRQs. */
134 static int irq_bindcount[NR_IRQS];
136 #define VALID_EVTCHN(_chn) ((_chn) != 0)
140 static uint8_t cpu_evtchn[NR_EVENT_CHANNELS];
141 static unsigned long cpu_evtchn_mask[MAX_VIRT_CPUS][NR_EVENT_CHANNELS/LONG_BIT];
143 #define active_evtchns(cpu,sh,idx) \
144 ((sh)->evtchn_pending[idx] & \
145 cpu_evtchn_mask[cpu][idx] & \
146 ~(sh)->evtchn_mask[idx])
148 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
150 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
151 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
152 cpu_evtchn[chn] = cpu;
155 static void init_evtchn_cpu_bindings(void)
157 /* By default all event channels notify CPU#0. */
158 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
159 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
162 #define cpu_from_evtchn(evtchn) (cpu_evtchn[evtchn])
166 #define active_evtchns(cpu,sh,idx) \
167 ((sh)->evtchn_pending[idx] & \
168 ~(sh)->evtchn_mask[idx])
169 #define bind_evtchn_to_cpu(chn,cpu) ((void)0)
170 #define init_evtchn_cpu_bindings() ((void)0)
171 #define cpu_from_evtchn(evtchn) (0)
177 * Force a proper event-channel callback from Xen after clearing the
178 * callback mask. We do this in a very simple manner, by making a call
179 * down into Xen. The pending flag will be checked by Xen on return.
181 void force_evtchn_callback(void)
183 (void)HYPERVISOR_xen_version(0, NULL);
187 evtchn_do_upcall(struct trapframe *frame)
189 unsigned long l1, l2;
190 unsigned int l1i, l2i, port;
193 vcpu_info_t *vcpu_info;
195 cpu = PCPU_GET(cpuid);
196 s = HYPERVISOR_shared_info;
197 vcpu_info = &s->vcpu_info[cpu];
199 vcpu_info->evtchn_upcall_pending = 0;
201 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
202 l1 = xen_xchg(&vcpu_info->evtchn_pending_sel, 0);
208 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
211 port = (l1i * LONG_BIT) + l2i;
212 if ((irq = evtchn_to_irq[port]) != -1) {
213 struct intsrc *isrc = intr_lookup_source(irq);
220 intr_execute_handlers(isrc, frame);
222 evtchn_device_upcall(port);
229 * Send an IPI from the current CPU to the destination CPU.
232 ipi_pcpu(unsigned int cpu, int vector)
236 irq = pcpu_find(cpu)->pc_ipi_to_irq[vector];
238 notify_remote_via_irq(irq);
242 find_unbound_irq(void)
246 for (dynirq = 0; dynirq < NR_IRQS; dynirq++) {
247 irq = dynirq_to_irq(dynirq);
248 if (irq_bindcount[irq] == 0)
253 panic("No available IRQ to bind to: increase NR_IRQS!\n");
259 bind_caller_port_to_irq(unsigned int caller_port, int * port)
263 mtx_lock_spin(&irq_mapping_update_lock);
265 if ((irq = evtchn_to_irq[caller_port]) == -1) {
266 if ((irq = find_unbound_irq()) < 0)
269 evtchn_to_irq[caller_port] = irq;
270 irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
273 irq_bindcount[irq]++;
277 mtx_unlock_spin(&irq_mapping_update_lock);
282 bind_local_port_to_irq(unsigned int local_port, int * port)
286 mtx_lock_spin(&irq_mapping_update_lock);
288 KASSERT(evtchn_to_irq[local_port] == -1,
289 ("evtchn_to_irq inconsistent"));
291 if ((irq = find_unbound_irq()) < 0) {
292 struct evtchn_close close = { .port = local_port };
293 HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
298 evtchn_to_irq[local_port] = irq;
299 irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
300 irq_bindcount[irq]++;
304 mtx_unlock_spin(&irq_mapping_update_lock);
309 bind_listening_port_to_irq(unsigned int remote_domain, int * port)
311 struct evtchn_alloc_unbound alloc_unbound;
314 alloc_unbound.dom = DOMID_SELF;
315 alloc_unbound.remote_dom = remote_domain;
317 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
320 return err ? : bind_local_port_to_irq(alloc_unbound.port, port);
324 bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
325 unsigned int remote_port, int * port)
327 struct evtchn_bind_interdomain bind_interdomain;
330 bind_interdomain.remote_dom = remote_domain;
331 bind_interdomain.remote_port = remote_port;
333 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
336 return err ? : bind_local_port_to_irq(bind_interdomain.local_port, port);
340 bind_virq_to_irq(unsigned int virq, unsigned int cpu, int * port)
342 struct evtchn_bind_virq bind_virq;
345 mtx_lock_spin(&irq_mapping_update_lock);
347 if ((irq = pcpu_find(cpu)->pc_virq_to_irq[virq]) == -1) {
348 if ((irq = find_unbound_irq()) < 0)
351 bind_virq.virq = virq;
352 bind_virq.vcpu = cpu;
353 HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
355 evtchn = bind_virq.port;
357 evtchn_to_irq[evtchn] = irq;
358 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
360 pcpu_find(cpu)->pc_virq_to_irq[virq] = irq;
362 bind_evtchn_to_cpu(evtchn, cpu);
365 irq_bindcount[irq]++;
368 mtx_unlock_spin(&irq_mapping_update_lock);
375 bind_ipi_to_irq(unsigned int ipi, unsigned int cpu, int * port)
377 struct evtchn_bind_ipi bind_ipi;
381 mtx_lock_spin(&irq_mapping_update_lock);
383 if ((irq = pcpu_find(cpu)->pc_ipi_to_irq[ipi]) == -1) {
384 if ((irq = find_unbound_irq()) < 0)
388 HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
389 evtchn = bind_ipi.port;
391 evtchn_to_irq[evtchn] = irq;
392 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
394 pcpu_find(cpu)->pc_ipi_to_irq[ipi] = irq;
396 bind_evtchn_to_cpu(evtchn, cpu);
398 irq_bindcount[irq]++;
402 mtx_unlock_spin(&irq_mapping_update_lock);
409 unbind_from_irq(int irq)
411 struct evtchn_close close;
412 int evtchn = evtchn_from_irq(irq);
415 mtx_lock_spin(&irq_mapping_update_lock);
417 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
419 HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
421 switch (type_from_irq(irq)) {
423 cpu = cpu_from_evtchn(evtchn);
424 pcpu_find(cpu)->pc_virq_to_irq[index_from_irq(irq)] = -1;
427 cpu = cpu_from_evtchn(evtchn);
428 pcpu_find(cpu)->pc_ipi_to_irq[index_from_irq(irq)] = -1;
434 /* Closed ports are implicitly re-bound to VCPU0. */
435 bind_evtchn_to_cpu(evtchn, 0);
437 evtchn_to_irq[evtchn] = -1;
438 irq_info[irq] = IRQ_UNBOUND;
441 mtx_unlock_spin(&irq_mapping_update_lock);
445 bind_caller_port_to_irqhandler(unsigned int caller_port,
446 const char *devname, driver_intr_t handler, void *arg,
447 unsigned long irqflags, unsigned int *irqp)
453 irq = bind_caller_port_to_irq(caller_port, &port);
454 intr_register_source(&xp->xp_pins[irq].xp_intsrc);
455 error = intr_add_handler(devname, irq, NULL, handler, arg, irqflags,
456 &xp->xp_pins[irq].xp_cookie);
459 unbind_from_irq(irq);
472 bind_listening_port_to_irqhandler(unsigned int remote_domain,
473 const char *devname, driver_intr_t handler, void *arg,
474 unsigned long irqflags, unsigned int *irqp)
480 irq = bind_listening_port_to_irq(remote_domain, &port);
481 intr_register_source(&xp->xp_pins[irq].xp_intsrc);
482 error = intr_add_handler(devname, irq, NULL, handler, arg, irqflags,
483 &xp->xp_pins[irq].xp_cookie);
485 unbind_from_irq(irq);
497 bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
498 unsigned int remote_port, const char *devname,
499 driver_intr_t handler, void *arg, unsigned long irqflags,
506 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port, &port);
507 intr_register_source(&xp->xp_pins[irq].xp_intsrc);
508 error = intr_add_handler(devname, irq, NULL, handler, arg,
509 irqflags, &xp->xp_pins[irq].xp_cookie);
511 unbind_from_irq(irq);
523 bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
524 const char *devname, driver_filter_t filter, driver_intr_t handler,
525 void *arg, unsigned long irqflags, unsigned int *irqp)
531 irq = bind_virq_to_irq(virq, cpu, &port);
532 intr_register_source(&xp->xp_pins[irq].xp_intsrc);
533 error = intr_add_handler(devname, irq, filter, handler,
534 arg, irqflags, &xp->xp_pins[irq].xp_cookie);
536 unbind_from_irq(irq);
548 bind_ipi_to_irqhandler(unsigned int ipi, unsigned int cpu,
549 const char *devname, driver_filter_t filter,
550 unsigned long irqflags, unsigned int *irqp)
556 irq = bind_ipi_to_irq(ipi, cpu, &port);
557 intr_register_source(&xp->xp_pins[irq].xp_intsrc);
558 error = intr_add_handler(devname, irq, filter, NULL,
559 NULL, irqflags, &xp->xp_pins[irq].xp_cookie);
561 unbind_from_irq(irq);
573 unbind_from_irqhandler(unsigned int irq)
575 intr_remove_handler(xp->xp_pins[irq].xp_cookie);
576 unbind_from_irq(irq);
580 /* Rebind an evtchn so that it gets delivered to a specific cpu */
582 rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
584 evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
587 mtx_lock_spin(&irq_mapping_update_lock);
589 evtchn = evtchn_from_irq(irq);
590 if (!VALID_EVTCHN(evtchn)) {
591 mtx_unlock_spin(&irq_mapping_update_lock);
595 /* Send future instances of this interrupt to other vcpu. */
596 bind_vcpu.port = evtchn;
597 bind_vcpu.vcpu = tcpu;
600 * If this fails, it usually just indicates that we're dealing with a
601 * virq or IPI channel, which don't actually need to be rebound. Ignore
602 * it, but don't do the xenlinux-level rebind in that case.
604 if (HYPERVISOR_event_channel_op(&op) >= 0)
605 bind_evtchn_to_cpu(evtchn, tcpu);
607 mtx_unlock_spin(&irq_mapping_update_lock);
611 static void set_affinity_irq(unsigned irq, cpumask_t dest)
613 unsigned tcpu = ffs(dest) - 1;
614 rebind_irq_to_cpu(irq, tcpu);
619 * Interface to generic handling in intr_machdep.c
623 /*------------ interrupt handling --------------------------------------*/
624 #define TODO printf("%s: not implemented!\n", __func__)
627 static void xenpic_dynirq_enable_source(struct intsrc *isrc);
628 static void xenpic_dynirq_disable_source(struct intsrc *isrc, int);
629 static void xenpic_dynirq_eoi_source(struct intsrc *isrc);
630 static void xenpic_dynirq_enable_intr(struct intsrc *isrc);
631 static void xenpic_dynirq_disable_intr(struct intsrc *isrc);
633 static void xenpic_pirq_enable_source(struct intsrc *isrc);
634 static void xenpic_pirq_disable_source(struct intsrc *isrc, int);
635 static void xenpic_pirq_eoi_source(struct intsrc *isrc);
636 static void xenpic_pirq_enable_intr(struct intsrc *isrc);
639 static int xenpic_vector(struct intsrc *isrc);
640 static int xenpic_source_pending(struct intsrc *isrc);
641 static void xenpic_suspend(struct pic* pic);
642 static void xenpic_resume(struct pic* pic);
643 static int xenpic_assign_cpu(struct intsrc *, u_int apic_id);
646 struct pic xenpic_dynirq_template = {
647 .pic_enable_source = xenpic_dynirq_enable_source,
648 .pic_disable_source = xenpic_dynirq_disable_source,
649 .pic_eoi_source = xenpic_dynirq_eoi_source,
650 .pic_enable_intr = xenpic_dynirq_enable_intr,
651 .pic_disable_intr = xenpic_dynirq_disable_intr,
652 .pic_vector = xenpic_vector,
653 .pic_source_pending = xenpic_source_pending,
654 .pic_suspend = xenpic_suspend,
655 .pic_resume = xenpic_resume
658 struct pic xenpic_pirq_template = {
659 .pic_enable_source = xenpic_pirq_enable_source,
660 .pic_disable_source = xenpic_pirq_disable_source,
661 .pic_eoi_source = xenpic_pirq_eoi_source,
662 .pic_enable_intr = xenpic_pirq_enable_intr,
663 .pic_vector = xenpic_vector,
664 .pic_source_pending = xenpic_source_pending,
665 .pic_suspend = xenpic_suspend,
666 .pic_resume = xenpic_resume,
667 .pic_assign_cpu = xenpic_assign_cpu
673 xenpic_dynirq_enable_source(struct intsrc *isrc)
676 struct xenpic_intsrc *xp;
678 xp = (struct xenpic_intsrc *)isrc;
680 mtx_lock_spin(&irq_mapping_update_lock);
682 irq = xenpic_vector(isrc);
683 unmask_evtchn(evtchn_from_irq(irq));
684 xp->xp_masked = FALSE;
686 mtx_unlock_spin(&irq_mapping_update_lock);
690 xenpic_dynirq_disable_source(struct intsrc *isrc, int foo)
693 struct xenpic_intsrc *xp;
695 xp = (struct xenpic_intsrc *)isrc;
697 mtx_lock_spin(&irq_mapping_update_lock);
698 if (!xp->xp_masked) {
699 irq = xenpic_vector(isrc);
700 mask_evtchn(evtchn_from_irq(irq));
701 xp->xp_masked = TRUE;
703 mtx_unlock_spin(&irq_mapping_update_lock);
707 xenpic_dynirq_enable_intr(struct intsrc *isrc)
710 struct xenpic_intsrc *xp;
712 xp = (struct xenpic_intsrc *)isrc;
713 mtx_lock_spin(&irq_mapping_update_lock);
715 irq = xenpic_vector(isrc);
716 unmask_evtchn(evtchn_from_irq(irq));
717 mtx_unlock_spin(&irq_mapping_update_lock);
721 xenpic_dynirq_disable_intr(struct intsrc *isrc)
724 struct xenpic_intsrc *xp;
726 xp = (struct xenpic_intsrc *)isrc;
727 mtx_lock_spin(&irq_mapping_update_lock);
728 irq = xenpic_vector(isrc);
729 mask_evtchn(evtchn_from_irq(irq));
731 mtx_unlock_spin(&irq_mapping_update_lock);
735 xenpic_dynirq_eoi_source(struct intsrc *isrc)
738 struct xenpic_intsrc *xp;
740 xp = (struct xenpic_intsrc *)isrc;
741 mtx_lock_spin(&irq_mapping_update_lock);
743 irq = xenpic_vector(isrc);
744 unmask_evtchn(evtchn_from_irq(irq));
745 mtx_unlock_spin(&irq_mapping_update_lock);
749 xenpic_vector(struct intsrc *isrc)
751 struct xenpic_intsrc *pin;
753 pin = (struct xenpic_intsrc *)isrc;
754 //printf("xenpic_vector(): isrc=%p,vector=%u\n", pin, pin->xp_vector);
756 return (pin->xp_vector);
760 xenpic_source_pending(struct intsrc *isrc)
762 struct xenpic_intsrc *pin = (struct xenpic_intsrc *)isrc;
765 printf("xenpic_source_pending(): vector=%x,masked=%x\n",
766 pin->xp_vector, pin->xp_masked);
768 /* notify_remote_via_evtchn(pin->xp_vector); // XXX RS: Is this correct? */
773 xenpic_suspend(struct pic* pic)
779 xenpic_resume(struct pic* pic)
785 xenpic_assign_cpu(struct intsrc *isrc, u_int apic_id)
792 notify_remote_via_irq(int irq)
794 int evtchn = evtchn_from_irq(irq);
796 if (VALID_EVTCHN(evtchn))
797 notify_remote_via_evtchn(evtchn);
799 panic("invalid evtchn %d", irq);
802 /* required for support of physical devices */
804 pirq_unmask_notify(int pirq)
806 struct physdev_eoi eoi = { .irq = pirq };
808 if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
809 (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
814 pirq_query_unmask(int pirq)
816 struct physdev_irq_status_query irq_status_query;
818 irq_status_query.irq = pirq;
819 (void)HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY, &irq_status_query);
820 clear_bit(pirq, &pirq_needs_unmask_notify[0]);
821 if ( irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
822 set_bit(pirq, &pirq_needs_unmask_notify[0]);
826 * On startup, if there is no action associated with the IRQ then we are
827 * probing. In this case we should not share with others as it will confuse us.
829 #define probing_irq(_irq) (intr_lookup_source(irq) == NULL)
832 xenpic_pirq_enable_intr(struct intsrc *isrc)
834 struct evtchn_bind_pirq bind_pirq;
838 mtx_lock_spin(&irq_mapping_update_lock);
839 irq = xenpic_vector(isrc);
840 evtchn = evtchn_from_irq(irq);
842 if (VALID_EVTCHN(evtchn))
845 bind_pirq.pirq = irq;
846 /* NB. We are happy to share unless we are probing. */
847 bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
849 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
850 #ifndef XEN_PRIVILEGED_GUEST
851 panic("unexpected pirq call");
853 if (!probing_irq(irq)) /* Some failures are expected when probing. */
854 printf("Failed to obtain physical IRQ %d\n", irq);
855 mtx_unlock_spin(&irq_mapping_update_lock);
858 evtchn = bind_pirq.port;
860 pirq_query_unmask(irq_to_pirq(irq));
862 bind_evtchn_to_cpu(evtchn, 0);
863 evtchn_to_irq[evtchn] = irq;
864 irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
867 unmask_evtchn(evtchn);
868 pirq_unmask_notify(irq_to_pirq(irq));
869 mtx_unlock_spin(&irq_mapping_update_lock);
873 xenpic_pirq_enable_source(struct intsrc *isrc)
878 mtx_lock_spin(&irq_mapping_update_lock);
879 irq = xenpic_vector(isrc);
880 evtchn = evtchn_from_irq(irq);
882 if (!VALID_EVTCHN(evtchn))
885 unmask_evtchn(evtchn);
886 pirq_unmask_notify(irq_to_pirq(irq));
888 mtx_unlock_spin(&irq_mapping_update_lock);
892 xenpic_pirq_disable_source(struct intsrc *isrc, int eoi)
897 mtx_lock_spin(&irq_mapping_update_lock);
898 irq = xenpic_vector(isrc);
899 evtchn = evtchn_from_irq(irq);
901 if (!VALID_EVTCHN(evtchn))
906 mtx_unlock_spin(&irq_mapping_update_lock);
911 xenpic_pirq_eoi_source(struct intsrc *isrc)
916 mtx_lock_spin(&irq_mapping_update_lock);
917 irq = xenpic_vector(isrc);
918 evtchn = evtchn_from_irq(irq);
920 if (!VALID_EVTCHN(evtchn))
923 unmask_evtchn(evtchn);
924 pirq_unmask_notify(irq_to_pirq(irq));
926 mtx_unlock_spin(&irq_mapping_update_lock);
930 irq_to_evtchn_port(int irq)
932 return evtchn_from_irq(irq);
936 mask_evtchn(int port)
938 shared_info_t *s = HYPERVISOR_shared_info;
939 synch_set_bit(port, &s->evtchn_mask[0]);
943 unmask_evtchn(int port)
945 shared_info_t *s = HYPERVISOR_shared_info;
946 unsigned int cpu = PCPU_GET(cpuid);
947 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
949 /* Slow path (hypercall) if this is a non-local port. */
950 if (unlikely(cpu != cpu_from_evtchn(port))) {
951 struct evtchn_unmask unmask = { .port = port };
952 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
956 synch_clear_bit(port, &s->evtchn_mask);
959 * The following is basically the equivalent of 'hw_resend_irq'. Just
960 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
963 if (synch_test_bit(port, &s->evtchn_pending) &&
964 !synch_test_and_set_bit(port / LONG_BIT,
965 &vcpu_info->evtchn_pending_sel)) {
966 vcpu_info->evtchn_upcall_pending = 1;
967 if (!vcpu_info->evtchn_upcall_mask)
968 force_evtchn_callback();
972 void irq_resume(void)
975 int cpu, pirq, virq, ipi, irq, evtchn;
977 struct evtchn_bind_virq bind_virq;
978 struct evtchn_bind_ipi bind_ipi;
980 init_evtchn_cpu_bindings();
982 /* New event-channel space is not 'live' yet. */
983 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
986 /* Check that no PIRQs are still bound. */
987 for (pirq = 0; pirq < NR_PIRQS; pirq++) {
988 KASSERT(irq_info[pirq_to_irq(pirq)] == IRQ_UNBOUND,
989 ("pirq_to_irq inconsistent"));
992 /* Secondary CPUs must have no VIRQ or IPI bindings. */
993 for (cpu = 1; cpu < MAX_VIRT_CPUS; cpu++) {
994 for (virq = 0; virq < NR_VIRQS; virq++) {
995 KASSERT(pcpu_find(cpu)->pc_virq_to_irq[virq] == -1,
996 ("virq_to_irq inconsistent"));
998 for (ipi = 0; ipi < NR_IPIS; ipi++) {
999 KASSERT(pcpu_find(cpu)->pc_ipi_to_irq[ipi] == -1,
1000 ("ipi_to_irq inconsistent"));
1004 /* No IRQ <-> event-channel mappings. */
1005 for (irq = 0; irq < NR_IRQS; irq++)
1006 irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
1007 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1008 evtchn_to_irq[evtchn] = -1;
1010 /* Primary CPU: rebind VIRQs automatically. */
1011 for (virq = 0; virq < NR_VIRQS; virq++) {
1012 if ((irq = pcpu_find(0)->pc_virq_to_irq[virq]) == -1)
1015 KASSERT(irq_info[irq] == mk_irq_info(IRQT_VIRQ, virq, 0),
1016 ("irq_info inconsistent"));
1018 /* Get a new binding from Xen. */
1019 bind_virq.virq = virq;
1021 HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
1022 evtchn = bind_virq.port;
1024 /* Record the new mapping. */
1025 evtchn_to_irq[evtchn] = irq;
1026 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
1028 /* Ready for use. */
1029 unmask_evtchn(evtchn);
1032 /* Primary CPU: rebind IPIs automatically. */
1033 for (ipi = 0; ipi < NR_IPIS; ipi++) {
1034 if ((irq = pcpu_find(0)->pc_ipi_to_irq[ipi]) == -1)
1037 KASSERT(irq_info[irq] == mk_irq_info(IRQT_IPI, ipi, 0),
1038 ("irq_info inconsistent"));
1040 /* Get a new binding from Xen. */
1041 memset(&op, 0, sizeof(op));
1043 HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
1044 evtchn = bind_ipi.port;
1046 /* Record the new mapping. */
1047 evtchn_to_irq[evtchn] = irq;
1048 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
1050 /* Ready for use. */
1051 unmask_evtchn(evtchn);
1056 evtchn_init(void *dummy __unused)
1059 struct xenpic_intsrc *pin, *tpin;
1062 init_evtchn_cpu_bindings();
1064 /* No VIRQ or IPI bindings. */
1065 for (cpu = 0; cpu < mp_ncpus; cpu++) {
1066 for (i = 0; i < NR_VIRQS; i++)
1067 pcpu_find(cpu)->pc_virq_to_irq[i] = -1;
1068 for (i = 0; i < NR_IPIS; i++)
1069 pcpu_find(cpu)->pc_ipi_to_irq[i] = -1;
1072 /* No event-channel -> IRQ mappings. */
1073 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1074 evtchn_to_irq[i] = -1;
1075 mask_evtchn(i); /* No event channels are 'live' right now. */
1078 /* No IRQ -> event-channel mappings. */
1079 for (i = 0; i < NR_IRQS; i++)
1080 irq_info[i] = IRQ_UNBOUND;
1082 xp = malloc(sizeof(struct xenpic) + NR_IRQS*sizeof(struct xenpic_intsrc),
1083 M_DEVBUF, M_WAITOK);
1085 xp->xp_dynirq_pic = &xenpic_dynirq_template;
1086 xp->xp_pirq_pic = &xenpic_pirq_template;
1087 xp->xp_numintr = NR_IRQS;
1088 bzero(xp->xp_pins, sizeof(struct xenpic_intsrc) * NR_IRQS);
1091 /* We need to register our PIC's beforehand */
1092 if (intr_register_pic(&xenpic_pirq_template))
1093 panic("XEN: intr_register_pic() failure");
1094 if (intr_register_pic(&xenpic_dynirq_template))
1095 panic("XEN: intr_register_pic() failure");
1098 * Initialize the dynamic IRQ's - we initialize the structures, but
1099 * we do not bind them (bind_evtchn_to_irqhandle() does this)
1102 for (i = 0; i < NR_DYNIRQS; i++) {
1103 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
1104 irq_bindcount[dynirq_to_irq(i)] = 0;
1106 tpin = &pin[dynirq_to_irq(i)];
1107 tpin->xp_intsrc.is_pic = xp->xp_dynirq_pic;
1108 tpin->xp_vector = dynirq_to_irq(i);
1112 * Now, we go ahead and claim every PIRQ there is.
1115 for (i = 0; i < NR_PIRQS; i++) {
1116 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
1117 irq_bindcount[pirq_to_irq(i)] = 0;
1120 /* If not domain 0, force our RTC driver to fail its probe. */
1121 if ((i == RTC_IRQ) &&
1122 !(xen_start_info->flags & SIF_INITDOMAIN))
1125 tpin = &pin[pirq_to_irq(i)];
1126 tpin->xp_intsrc.is_pic = xp->xp_pirq_pic;
1127 tpin->xp_vector = pirq_to_irq(i);
1132 SYSINIT(evtchn_init, SI_SUB_INTR, SI_ORDER_MIDDLE, evtchn_init, NULL);
1134 * irq_mapping_update_lock: in order to allow an interrupt to occur in a critical
1135 * section, to set pcpu->ipending (etc...) properly, we
1136 * must be able to get the icu lock, so it can't be
1140 MTX_SYSINIT(irq_mapping_update_lock, &irq_mapping_update_lock, "xp", MTX_SPIN);