1 /******************************************************************************
4 * Xen event and interrupt services for x86 HVM guests.
6 * Copyright (c) 2002-2005, K A Fraser
7 * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
8 * Copyright (c) 2012, Spectra Logic Corporation
10 * This file may be distributed separately from the Linux kernel, or
11 * incorporated into other software packages, subject to the following license:
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
38 #include <sys/systm.h>
40 #include <sys/malloc.h>
41 #include <sys/kernel.h>
42 #include <sys/limits.h>
44 #include <sys/mutex.h>
45 #include <sys/interrupt.h>
52 #include <machine/intr_machdep.h>
53 #include <x86/apicvar.h>
54 #include <x86/apicreg.h>
55 #include <machine/smp.h>
56 #include <machine/stdarg.h>
58 #include <machine/xen/synch_bitops.h>
59 #include <machine/xen/xen-os.h>
61 #include <xen/xen-os.h>
62 #include <xen/hypervisor.h>
63 #include <xen/xen_intr.h>
64 #include <xen/evtchn/evtchnvar.h>
66 #include <dev/xen/xenpci/xenpcivar.h>
67 #include <dev/pci/pcivar.h>
73 static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services");
75 static u_int first_evtchn_irq;
78 * Per-cpu event channel processing state.
80 struct xen_intr_pcpu_data {
82 * The last event channel bitmap section (level one bit) processed.
83 * This is used to ensure we scan all ports before
84 * servicing an already servied port again.
86 u_int last_processed_l1i;
89 * The last event channel processed within the event channel
90 * bitmap being scanned.
92 u_int last_processed_l2i;
94 /** Pointer to this CPU's interrupt statistic counter. */
95 u_long *evtchn_intrcnt;
98 * A bitmap of ports that can be serviced from this CPU.
99 * A set bit means interrupt handling is enabled.
101 u_long evtchn_enabled[sizeof(u_long) * 8];
105 * Start the scan at port 0 by initializing the last scanned
106 * location as the highest numbered event channel port.
108 static DPCPU_DEFINE(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
109 .last_processed_l1i = LONG_BIT - 1,
110 .last_processed_l2i = LONG_BIT - 1
113 DPCPU_DECLARE(struct vcpu_info *, vcpu_info);
115 #define XEN_EEXIST 17 /* Xen "already exists" error */
116 #define XEN_ALLOCATE_VECTOR 0 /* Allocate a vector for this event channel */
117 #define XEN_INVALID_EVTCHN 0 /* Invalid event channel */
119 #define is_valid_evtchn(x) ((x) != XEN_INVALID_EVTCHN)
122 struct intsrc xi_intsrc;
123 enum evtchn_type xi_type;
124 int xi_cpu; /* VCPU for delivery. */
125 int xi_vector; /* Global isrc vector number. */
126 evtchn_port_t xi_port;
130 u_int xi_close:1; /* close on unbind? */
132 u_int xi_edgetrigger:1;
136 static void xen_intr_suspend(struct pic *);
137 static void xen_intr_resume(struct pic *, bool suspend_cancelled);
138 static void xen_intr_enable_source(struct intsrc *isrc);
139 static void xen_intr_disable_source(struct intsrc *isrc, int eoi);
140 static void xen_intr_eoi_source(struct intsrc *isrc);
141 static void xen_intr_enable_intr(struct intsrc *isrc);
142 static void xen_intr_disable_intr(struct intsrc *isrc);
143 static int xen_intr_vector(struct intsrc *isrc);
144 static int xen_intr_source_pending(struct intsrc *isrc);
145 static int xen_intr_config_intr(struct intsrc *isrc,
146 enum intr_trigger trig, enum intr_polarity pol);
147 static int xen_intr_assign_cpu(struct intsrc *isrc, u_int apic_id);
149 static void xen_intr_pirq_enable_source(struct intsrc *isrc);
150 static void xen_intr_pirq_disable_source(struct intsrc *isrc, int eoi);
151 static void xen_intr_pirq_eoi_source(struct intsrc *isrc);
152 static void xen_intr_pirq_enable_intr(struct intsrc *isrc);
153 static void xen_intr_pirq_disable_intr(struct intsrc *isrc);
154 static int xen_intr_pirq_config_intr(struct intsrc *isrc,
155 enum intr_trigger trig, enum intr_polarity pol);
158 * PIC interface for all event channel port types except physical IRQs.
160 struct pic xen_intr_pic = {
161 .pic_enable_source = xen_intr_enable_source,
162 .pic_disable_source = xen_intr_disable_source,
163 .pic_eoi_source = xen_intr_eoi_source,
164 .pic_enable_intr = xen_intr_enable_intr,
165 .pic_disable_intr = xen_intr_disable_intr,
166 .pic_vector = xen_intr_vector,
167 .pic_source_pending = xen_intr_source_pending,
168 .pic_suspend = xen_intr_suspend,
169 .pic_resume = xen_intr_resume,
170 .pic_config_intr = xen_intr_config_intr,
171 .pic_assign_cpu = xen_intr_assign_cpu
175 * PIC interface for all event channel representing
176 * physical interrupt sources.
178 struct pic xen_intr_pirq_pic = {
180 .pic_register_sources = xenpv_register_pirqs,
182 .pic_enable_source = xen_intr_pirq_enable_source,
183 .pic_disable_source = xen_intr_pirq_disable_source,
184 .pic_eoi_source = xen_intr_pirq_eoi_source,
185 .pic_enable_intr = xen_intr_pirq_enable_intr,
186 .pic_disable_intr = xen_intr_pirq_disable_intr,
187 .pic_vector = xen_intr_vector,
188 .pic_source_pending = xen_intr_source_pending,
189 .pic_config_intr = xen_intr_pirq_config_intr,
190 .pic_assign_cpu = xen_intr_assign_cpu
193 static struct mtx xen_intr_isrc_lock;
194 static u_int xen_intr_auto_vector_count;
195 static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS];
196 static u_long *xen_intr_pirq_eoi_map;
197 static boolean_t xen_intr_pirq_eoi_map_enabled;
199 /*------------------------- Private Functions --------------------------------*/
201 * Disable signal delivery for an event channel port on the
204 * \param port The event channel port to mask.
206 * This API is used to manage the port<=>CPU binding of event
209 * \note This operation does not preclude reception of an event
210 * for this event channel on another CPU. To mask the
211 * event channel globally, use evtchn_mask().
214 evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port)
216 struct xen_intr_pcpu_data *pcpu;
218 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
219 xen_clear_bit(port, pcpu->evtchn_enabled);
223 * Enable signal delivery for an event channel port on the
226 * \param port The event channel port to unmask.
228 * This API is used to manage the port<=>CPU binding of event
231 * \note This operation does not guarantee that event delivery
232 * is enabled for this event channel port. The port must
233 * also be globally enabled. See evtchn_unmask().
236 evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port)
238 struct xen_intr_pcpu_data *pcpu;
240 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
241 xen_set_bit(port, pcpu->evtchn_enabled);
245 * Allocate and register a per-cpu Xen upcall interrupt counter.
247 * \param cpu The cpu for which to register this interrupt count.
250 xen_intr_intrcnt_add(u_int cpu)
252 char buf[MAXCOMLEN + 1];
253 struct xen_intr_pcpu_data *pcpu;
255 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
256 if (pcpu->evtchn_intrcnt != NULL)
259 snprintf(buf, sizeof(buf), "cpu%d:xen", cpu);
260 intrcnt_add(buf, &pcpu->evtchn_intrcnt);
264 * Search for an already allocated but currently unused Xen interrupt
267 * \param type Restrict the search to interrupt sources of the given
270 * \return A pointer to a free Xen interrupt source object or NULL.
272 static struct xenisrc *
273 xen_intr_find_unused_isrc(enum evtchn_type type)
277 KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn isrc lock not held"));
279 for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx ++) {
280 struct xenisrc *isrc;
283 vector = first_evtchn_irq + isrc_idx;
284 isrc = (struct xenisrc *)intr_lookup_source(vector);
286 && isrc->xi_type == EVTCHN_TYPE_UNBOUND) {
287 KASSERT(isrc->xi_intsrc.is_handlers == 0,
288 ("Free evtchn still has handlers"));
289 isrc->xi_type = type;
297 * Allocate a Xen interrupt source object.
299 * \param type The type of interrupt source to create.
301 * \return A pointer to a newly allocated Xen interrupt source
304 static struct xenisrc *
305 xen_intr_alloc_isrc(enum evtchn_type type, int vector)
308 struct xenisrc *isrc;
310 KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn alloc lock not held"));
312 if (xen_intr_auto_vector_count > NR_EVENT_CHANNELS) {
315 printf("xen_intr_alloc: Event channels exhausted.\n");
320 if (type != EVTCHN_TYPE_PIRQ) {
321 vector = first_evtchn_irq + xen_intr_auto_vector_count;
322 xen_intr_auto_vector_count++;
325 KASSERT((intr_lookup_source(vector) == NULL),
326 ("Trying to use an already allocated vector"));
328 mtx_unlock(&xen_intr_isrc_lock);
329 isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO);
330 isrc->xi_intsrc.is_pic =
331 (type == EVTCHN_TYPE_PIRQ) ? &xen_intr_pirq_pic : &xen_intr_pic;
332 isrc->xi_vector = vector;
333 isrc->xi_type = type;
334 intr_register_source(&isrc->xi_intsrc);
335 mtx_lock(&xen_intr_isrc_lock);
341 * Attempt to free an active Xen interrupt source object.
343 * \param isrc The interrupt source object to release.
345 * \returns EBUSY if the source is still in use, otherwise 0.
348 xen_intr_release_isrc(struct xenisrc *isrc)
351 mtx_lock(&xen_intr_isrc_lock);
352 if (isrc->xi_intsrc.is_handlers != 0) {
353 mtx_unlock(&xen_intr_isrc_lock);
356 evtchn_mask_port(isrc->xi_port);
357 evtchn_clear_port(isrc->xi_port);
359 /* Rebind port to CPU 0. */
360 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
361 evtchn_cpu_unmask_port(0, isrc->xi_port);
363 if (isrc->xi_close != 0 && is_valid_evtchn(isrc->xi_port)) {
364 struct evtchn_close close = { .port = isrc->xi_port };
365 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
366 panic("EVTCHNOP_close failed");
369 xen_intr_port_to_isrc[isrc->xi_port] = NULL;
371 isrc->xi_type = EVTCHN_TYPE_UNBOUND;
373 isrc->xi_cookie = NULL;
374 mtx_unlock(&xen_intr_isrc_lock);
379 * Associate an interrupt handler with an already allocated local Xen
380 * event channel port.
382 * \param isrcp The returned Xen interrupt object associated with
383 * the specified local port.
384 * \param local_port The event channel to bind.
385 * \param type The event channel type of local_port.
386 * \param intr_owner The device making this bind request.
387 * \param filter An interrupt filter handler. Specify NULL
388 * to always dispatch to the ithread handler.
389 * \param handler An interrupt ithread handler. Optional (can
390 * specify NULL) if all necessary event actions
391 * are performed by filter.
392 * \param arg Argument to present to both filter and handler.
393 * \param irqflags Interrupt handler flags. See sys/bus.h.
394 * \param handlep Pointer to an opaque handle used to manage this
397 * \returns 0 on success, otherwise an errno.
400 xen_intr_bind_isrc(struct xenisrc **isrcp, evtchn_port_t local_port,
401 enum evtchn_type type, const char *intr_owner, driver_filter_t filter,
402 driver_intr_t handler, void *arg, enum intr_type flags,
403 xen_intr_handle_t *port_handlep)
405 struct xenisrc *isrc;
409 if (port_handlep == NULL) {
410 printf("%s: xen_intr_bind_isrc: Bad event handle\n",
415 mtx_lock(&xen_intr_isrc_lock);
416 isrc = xen_intr_find_unused_isrc(type);
418 isrc = xen_intr_alloc_isrc(type, XEN_ALLOCATE_VECTOR);
420 mtx_unlock(&xen_intr_isrc_lock);
424 isrc->xi_port = local_port;
425 xen_intr_port_to_isrc[local_port] = isrc;
426 mtx_unlock(&xen_intr_isrc_lock);
428 /* Assign the opaque handler (the event channel port) */
429 *port_handlep = &isrc->xi_vector;
432 if (type == EVTCHN_TYPE_PORT) {
434 * By default all interrupts are assigned to vCPU#0
435 * unless specified otherwise, so shuffle them to balance
436 * the interrupt load.
438 xen_intr_assign_cpu(&isrc->xi_intsrc, intr_next_cpu());
442 if (filter == NULL && handler == NULL) {
444 * No filter/handler provided, leave the event channel
445 * masked and without a valid handler, the caller is
446 * in charge of setting that up.
452 error = xen_intr_add_handler(intr_owner, filter, handler, arg, flags,
455 xen_intr_release_isrc(isrc);
463 * Lookup a Xen interrupt source object given an interrupt binding handle.
465 * \param handle A handle initialized by a previous call to
466 * xen_intr_bind_isrc().
468 * \returns A pointer to the Xen interrupt source object associated
469 * with the given interrupt handle. NULL if no association
472 static struct xenisrc *
473 xen_intr_isrc(xen_intr_handle_t handle)
480 vector = *(int *)handle;
481 KASSERT(vector >= first_evtchn_irq &&
482 vector < (first_evtchn_irq + xen_intr_auto_vector_count),
483 ("Xen interrupt vector is out of range"));
485 return ((struct xenisrc *)intr_lookup_source(vector));
489 * Determine the event channel ports at the given section of the
490 * event port bitmap which have pending events for the given cpu.
492 * \param pcpu The Xen interrupt pcpu data for the cpu being querried.
493 * \param sh The Xen shared info area.
494 * \param idx The index of the section of the event channel bitmap to
497 * \returns A u_long with bits set for every event channel with pending
501 xen_intr_active_ports(struct xen_intr_pcpu_data *pcpu, shared_info_t *sh,
505 CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(sh->evtchn_pending[0]));
506 CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(pcpu->evtchn_enabled[0]));
507 CTASSERT(sizeof(sh->evtchn_mask) == sizeof(sh->evtchn_pending));
508 CTASSERT(sizeof(sh->evtchn_mask) == sizeof(pcpu->evtchn_enabled));
509 return (sh->evtchn_pending[idx]
510 & ~sh->evtchn_mask[idx]
511 & pcpu->evtchn_enabled[idx]);
515 * Interrupt handler for processing all Xen event channel events.
517 * \param trap_frame The trap frame context for the current interrupt.
520 xen_intr_handle_upcall(struct trapframe *trap_frame)
522 u_int l1i, l2i, port, cpu;
523 u_long masked_l1, masked_l2;
524 struct xenisrc *isrc;
527 struct xen_intr_pcpu_data *pc;
531 * Disable preemption in order to always check and fire events
536 cpu = PCPU_GET(cpuid);
537 pc = DPCPU_PTR(xen_intr_pcpu);
538 s = HYPERVISOR_shared_info;
539 v = DPCPU_GET(vcpu_info);
541 if (xen_hvm_domain() && !xen_vector_callback_enabled) {
542 KASSERT((cpu == 0), ("Fired PCI event callback on wrong CPU"));
545 v->evtchn_upcall_pending = 0;
548 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
549 /* Clear master flag /before/ clearing selector flag. */
554 l1 = atomic_readandclear_long(&v->evtchn_pending_sel);
556 l1i = pc->last_processed_l1i;
557 l2i = pc->last_processed_l2i;
558 (*pc->evtchn_intrcnt)++;
562 l1i = (l1i + 1) % LONG_BIT;
563 masked_l1 = l1 & ((~0UL) << l1i);
565 if (masked_l1 == 0) {
567 * if we masked out all events, wrap around
574 l1i = ffsl(masked_l1) - 1;
577 l2 = xen_intr_active_ports(pc, s, l1i);
579 l2i = (l2i + 1) % LONG_BIT;
580 masked_l2 = l2 & ((~0UL) << l2i);
582 if (masked_l2 == 0) {
583 /* if we masked out all events, move on */
587 l2i = ffsl(masked_l2) - 1;
590 port = (l1i * LONG_BIT) + l2i;
591 synch_clear_bit(port, &s->evtchn_pending[0]);
593 isrc = xen_intr_port_to_isrc[port];
594 if (__predict_false(isrc == NULL))
597 /* Make sure we are firing on the right vCPU */
598 KASSERT((isrc->xi_cpu == PCPU_GET(cpuid)),
599 ("Received unexpected event on vCPU#%d, event bound to vCPU#%d",
600 PCPU_GET(cpuid), isrc->xi_cpu));
602 intr_execute_handlers(&isrc->xi_intsrc, trap_frame);
605 * If this is the final port processed,
606 * we'll pick up here+1 next time.
608 pc->last_processed_l1i = l1i;
609 pc->last_processed_l2i = l2i;
611 } while (l2i != LONG_BIT - 1);
613 l2 = xen_intr_active_ports(pc, s, l1i);
616 * We handled all ports, so we can clear the
626 xen_intr_init(void *dummy __unused)
628 shared_info_t *s = HYPERVISOR_shared_info;
629 struct xen_intr_pcpu_data *pcpu;
630 struct physdev_pirq_eoi_gmfn eoi_gmfn;
636 mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF);
639 * Set the per-cpu mask of CPU#0 to enable all, since by default all
640 * event channels are bound to CPU#0.
643 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
644 memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
645 sizeof(pcpu->evtchn_enabled));
648 for (i = 0; i < nitems(s->evtchn_mask); i++)
649 atomic_store_rel_long(&s->evtchn_mask[i], ~0);
651 /* Try to register PIRQ EOI map */
652 xen_intr_pirq_eoi_map = malloc(PAGE_SIZE, M_XENINTR, M_WAITOK | M_ZERO);
653 eoi_gmfn.gmfn = atop(vtophys(xen_intr_pirq_eoi_map));
654 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
655 if (rc != 0 && bootverbose)
656 printf("Xen interrupts: unable to register PIRQ EOI map\n");
658 xen_intr_pirq_eoi_map_enabled = true;
660 intr_register_pic(&xen_intr_pic);
661 intr_register_pic(&xen_intr_pirq_pic);
664 printf("Xen interrupt system initialized\n");
668 SYSINIT(xen_intr_init, SI_SUB_INTR, SI_ORDER_SECOND, xen_intr_init, NULL);
671 xen_intrcnt_init(void *dummy __unused)
679 * Register interrupt count manually as we aren't guaranteed to see a
680 * call to xen_intr_assign_cpu() before our first interrupt.
683 xen_intr_intrcnt_add(i);
685 SYSINIT(xen_intrcnt_init, SI_SUB_INTR, SI_ORDER_MIDDLE, xen_intrcnt_init, NULL);
688 xen_intr_alloc_irqs(void)
691 first_evtchn_irq = num_io_irqs;
692 num_io_irqs += NR_EVENT_CHANNELS;
695 /*--------------------------- Common PIC Functions ---------------------------*/
697 * Prepare this PIC for system suspension.
700 xen_intr_suspend(struct pic *unused)
705 xen_rebind_ipi(struct xenisrc *isrc)
708 int cpu = isrc->xi_cpu;
709 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
711 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
713 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
716 panic("unable to rebind xen IPI: %d", error);
718 isrc->xi_port = bind_ipi.port;
720 xen_intr_port_to_isrc[bind_ipi.port] = isrc;
722 error = xen_intr_assign_cpu(&isrc->xi_intsrc,
725 panic("unable to bind xen IPI to CPU#%d: %d",
728 evtchn_unmask_port(bind_ipi.port);
730 panic("Resume IPI event channel on UP");
735 xen_rebind_virq(struct xenisrc *isrc)
737 int cpu = isrc->xi_cpu;
738 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
740 struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq,
743 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
746 panic("unable to rebind xen VIRQ#%d: %d", isrc->xi_virq, error);
748 isrc->xi_port = bind_virq.port;
750 xen_intr_port_to_isrc[bind_virq.port] = isrc;
753 error = xen_intr_assign_cpu(&isrc->xi_intsrc,
756 panic("unable to bind xen VIRQ#%d to CPU#%d: %d",
757 isrc->xi_virq, cpu, error);
760 evtchn_unmask_port(bind_virq.port);
764 * Return this PIC to service after being suspended.
767 xen_intr_resume(struct pic *unused, bool suspend_cancelled)
769 shared_info_t *s = HYPERVISOR_shared_info;
770 struct xenisrc *isrc;
774 if (suspend_cancelled)
777 /* Reset the per-CPU masks */
779 struct xen_intr_pcpu_data *pcpu;
781 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
782 memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
783 sizeof(pcpu->evtchn_enabled));
786 /* Mask all event channels. */
787 for (i = 0; i < nitems(s->evtchn_mask); i++)
788 atomic_store_rel_long(&s->evtchn_mask[i], ~0);
790 /* Remove port -> isrc mappings */
791 memset(xen_intr_port_to_isrc, 0, sizeof(xen_intr_port_to_isrc));
793 /* Free unused isrcs and rebind VIRQs and IPIs */
794 for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx++) {
797 vector = first_evtchn_irq + isrc_idx;
798 isrc = (struct xenisrc *)intr_lookup_source(vector);
801 switch (isrc->xi_type) {
802 case EVTCHN_TYPE_IPI:
803 xen_rebind_ipi(isrc);
805 case EVTCHN_TYPE_VIRQ:
806 xen_rebind_virq(isrc);
816 * Disable a Xen interrupt source.
818 * \param isrc The interrupt source to disable.
821 xen_intr_disable_intr(struct intsrc *base_isrc)
823 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
825 evtchn_mask_port(isrc->xi_port);
829 * Determine the global interrupt vector number for
830 * a Xen interrupt source.
832 * \param isrc The interrupt source to query.
834 * \return The vector number corresponding to the given interrupt source.
837 xen_intr_vector(struct intsrc *base_isrc)
839 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
841 return (isrc->xi_vector);
845 * Determine whether or not interrupt events are pending on the
846 * the given interrupt source.
848 * \param isrc The interrupt source to query.
850 * \returns 0 if no events are pending, otherwise non-zero.
853 xen_intr_source_pending(struct intsrc *isrc)
856 * EventChannels are edge triggered and never masked.
857 * There can be no pending events.
863 * Perform configuration of an interrupt source.
865 * \param isrc The interrupt source to configure.
866 * \param trig Edge or level.
867 * \param pol Active high or low.
869 * \returns 0 if no events are pending, otherwise non-zero.
872 xen_intr_config_intr(struct intsrc *isrc, enum intr_trigger trig,
873 enum intr_polarity pol)
875 /* Configuration is only possible via the evtchn apis. */
880 * Configure CPU affinity for interrupt source event delivery.
882 * \param isrc The interrupt source to configure.
883 * \param apic_id The apic id of the CPU for handling future events.
885 * \returns 0 if successful, otherwise an errno.
888 xen_intr_assign_cpu(struct intsrc *base_isrc, u_int apic_id)
891 struct evtchn_bind_vcpu bind_vcpu;
892 struct xenisrc *isrc;
893 u_int to_cpu, vcpu_id;
896 if (xen_vector_callback_enabled == 0)
899 to_cpu = apic_cpuid(apic_id);
900 vcpu_id = pcpu_find(to_cpu)->pc_vcpu_id;
902 mtx_lock(&xen_intr_isrc_lock);
903 isrc = (struct xenisrc *)base_isrc;
904 if (!is_valid_evtchn(isrc->xi_port)) {
905 mtx_unlock(&xen_intr_isrc_lock);
910 * Mask the event channel while binding it to prevent interrupt
911 * delivery with an inconsistent state in isrc->xi_cpu.
913 masked = evtchn_test_and_set_mask(isrc->xi_port);
914 if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) ||
915 (isrc->xi_type == EVTCHN_TYPE_IPI)) {
917 * Virtual IRQs are associated with a cpu by
918 * the Hypervisor at evtchn_bind_virq time, so
919 * all we need to do is update the per-CPU masks.
921 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
922 isrc->xi_cpu = to_cpu;
923 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
927 bind_vcpu.port = isrc->xi_port;
928 bind_vcpu.vcpu = vcpu_id;
930 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu);
931 if (isrc->xi_cpu != to_cpu) {
933 /* Commit to new binding by removing the old one. */
934 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
935 isrc->xi_cpu = to_cpu;
936 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
942 evtchn_unmask_port(isrc->xi_port);
943 mtx_unlock(&xen_intr_isrc_lock);
950 /*------------------- Virtual Interrupt Source PIC Functions -----------------*/
952 * Mask a level triggered interrupt source.
954 * \param isrc The interrupt source to mask (if necessary).
955 * \param eoi If non-zero, perform any necessary end-of-interrupt
959 xen_intr_disable_source(struct intsrc *base_isrc, int eoi)
961 struct xenisrc *isrc;
963 isrc = (struct xenisrc *)base_isrc;
966 * NB: checking if the event channel is already masked is
967 * needed because the event channel user-space device
968 * masks event channels on it's filter as part of it's
969 * normal operation, and those shouldn't be automatically
970 * unmasked by the generic interrupt code. The event channel
971 * device will unmask them when needed.
973 isrc->xi_masked = !!evtchn_test_and_set_mask(isrc->xi_port);
977 * Unmask a level triggered interrupt source.
979 * \param isrc The interrupt source to unmask (if necessary).
982 xen_intr_enable_source(struct intsrc *base_isrc)
984 struct xenisrc *isrc;
986 isrc = (struct xenisrc *)base_isrc;
988 if (isrc->xi_masked == 0)
989 evtchn_unmask_port(isrc->xi_port);
993 * Perform any necessary end-of-interrupt acknowledgements.
995 * \param isrc The interrupt source to EOI.
998 xen_intr_eoi_source(struct intsrc *base_isrc)
1003 * Enable and unmask the interrupt source.
1005 * \param isrc The interrupt source to enable.
1008 xen_intr_enable_intr(struct intsrc *base_isrc)
1010 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
1012 evtchn_unmask_port(isrc->xi_port);
1015 /*------------------ Physical Interrupt Source PIC Functions -----------------*/
1017 * Mask a level triggered interrupt source.
1019 * \param isrc The interrupt source to mask (if necessary).
1020 * \param eoi If non-zero, perform any necessary end-of-interrupt
1024 xen_intr_pirq_disable_source(struct intsrc *base_isrc, int eoi)
1026 struct xenisrc *isrc;
1028 isrc = (struct xenisrc *)base_isrc;
1030 if (isrc->xi_edgetrigger == 0)
1031 evtchn_mask_port(isrc->xi_port);
1033 xen_intr_pirq_eoi_source(base_isrc);
1037 * Unmask a level triggered interrupt source.
1039 * \param isrc The interrupt source to unmask (if necessary).
1042 xen_intr_pirq_enable_source(struct intsrc *base_isrc)
1044 struct xenisrc *isrc;
1046 isrc = (struct xenisrc *)base_isrc;
1048 if (isrc->xi_edgetrigger == 0)
1049 evtchn_unmask_port(isrc->xi_port);
1053 * Perform any necessary end-of-interrupt acknowledgements.
1055 * \param isrc The interrupt source to EOI.
1058 xen_intr_pirq_eoi_source(struct intsrc *base_isrc)
1060 struct xenisrc *isrc;
1063 isrc = (struct xenisrc *)base_isrc;
1065 if (xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map)) {
1066 struct physdev_eoi eoi = { .irq = isrc->xi_pirq };
1068 error = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
1070 panic("Unable to EOI PIRQ#%d: %d\n",
1071 isrc->xi_pirq, error);
1076 * Enable and unmask the interrupt source.
1078 * \param isrc The interrupt source to enable.
1081 xen_intr_pirq_enable_intr(struct intsrc *base_isrc)
1083 struct xenisrc *isrc;
1084 struct evtchn_bind_pirq bind_pirq;
1085 struct physdev_irq_status_query irq_status;
1088 isrc = (struct xenisrc *)base_isrc;
1090 if (!xen_intr_pirq_eoi_map_enabled) {
1091 irq_status.irq = isrc->xi_pirq;
1092 error = HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query,
1095 panic("unable to get status of IRQ#%d", isrc->xi_pirq);
1097 if (irq_status.flags & XENIRQSTAT_needs_eoi) {
1099 * Since the dynamic PIRQ EOI map is not available
1100 * mark the PIRQ as needing EOI unconditionally.
1102 xen_set_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map);
1106 bind_pirq.pirq = isrc->xi_pirq;
1107 bind_pirq.flags = isrc->xi_edgetrigger ? 0 : BIND_PIRQ__WILL_SHARE;
1108 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
1110 panic("unable to bind IRQ#%d", isrc->xi_pirq);
1112 isrc->xi_port = bind_pirq.port;
1114 mtx_lock(&xen_intr_isrc_lock);
1115 KASSERT((xen_intr_port_to_isrc[bind_pirq.port] == NULL),
1116 ("trying to override an already setup event channel port"));
1117 xen_intr_port_to_isrc[bind_pirq.port] = isrc;
1118 mtx_unlock(&xen_intr_isrc_lock);
1120 evtchn_unmask_port(isrc->xi_port);
1124 * Disable an interrupt source.
1126 * \param isrc The interrupt source to disable.
1129 xen_intr_pirq_disable_intr(struct intsrc *base_isrc)
1131 struct xenisrc *isrc;
1132 struct evtchn_close close;
1135 isrc = (struct xenisrc *)base_isrc;
1137 evtchn_mask_port(isrc->xi_port);
1139 close.port = isrc->xi_port;
1140 error = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
1142 panic("unable to close event channel %d IRQ#%d",
1143 isrc->xi_port, isrc->xi_pirq);
1145 mtx_lock(&xen_intr_isrc_lock);
1146 xen_intr_port_to_isrc[isrc->xi_port] = NULL;
1147 mtx_unlock(&xen_intr_isrc_lock);
1153 * Perform configuration of an interrupt source.
1155 * \param isrc The interrupt source to configure.
1156 * \param trig Edge or level.
1157 * \param pol Active high or low.
1159 * \returns 0 if no events are pending, otherwise non-zero.
1162 xen_intr_pirq_config_intr(struct intsrc *base_isrc, enum intr_trigger trig,
1163 enum intr_polarity pol)
1165 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
1166 struct physdev_setup_gsi setup_gsi;
1169 KASSERT(!(trig == INTR_TRIGGER_CONFORM || pol == INTR_POLARITY_CONFORM),
1170 ("%s: Conforming trigger or polarity\n", __func__));
1172 setup_gsi.gsi = isrc->xi_pirq;
1173 setup_gsi.triggering = trig == INTR_TRIGGER_EDGE ? 0 : 1;
1174 setup_gsi.polarity = pol == INTR_POLARITY_HIGH ? 0 : 1;
1176 error = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
1177 if (error == -XEN_EEXIST) {
1178 if ((isrc->xi_edgetrigger && (trig != INTR_TRIGGER_EDGE)) ||
1179 (isrc->xi_activehi && (pol != INTR_POLARITY_HIGH)))
1180 panic("unable to reconfigure interrupt IRQ#%d",
1185 panic("unable to configure IRQ#%d\n", isrc->xi_pirq);
1187 isrc->xi_activehi = pol == INTR_POLARITY_HIGH ? 1 : 0;
1188 isrc->xi_edgetrigger = trig == INTR_TRIGGER_EDGE ? 1 : 0;
1193 /*--------------------------- Public Functions -------------------------------*/
1194 /*------- API comments for these methods can be found in xen/xenintr.h -------*/
1196 xen_intr_bind_local_port(device_t dev, evtchn_port_t local_port,
1197 driver_filter_t filter, driver_intr_t handler, void *arg,
1198 enum intr_type flags, xen_intr_handle_t *port_handlep)
1200 struct xenisrc *isrc;
1203 error = xen_intr_bind_isrc(&isrc, local_port, EVTCHN_TYPE_PORT,
1204 device_get_nameunit(dev), filter, handler, arg, flags,
1210 * The Event Channel API didn't open this port, so it is not
1211 * responsible for closing it automatically on unbind.
1218 xen_intr_alloc_and_bind_local_port(device_t dev, u_int remote_domain,
1219 driver_filter_t filter, driver_intr_t handler, void *arg,
1220 enum intr_type flags, xen_intr_handle_t *port_handlep)
1222 struct xenisrc *isrc;
1223 struct evtchn_alloc_unbound alloc_unbound;
1226 alloc_unbound.dom = DOMID_SELF;
1227 alloc_unbound.remote_dom = remote_domain;
1228 error = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
1232 * XXX Trap Hypercall error code Linuxisms in
1233 * the HYPERCALL layer.
1238 error = xen_intr_bind_isrc(&isrc, alloc_unbound.port, EVTCHN_TYPE_PORT,
1239 device_get_nameunit(dev), filter, handler, arg, flags,
1242 evtchn_close_t close = { .port = alloc_unbound.port };
1243 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1244 panic("EVTCHNOP_close failed");
1253 xen_intr_bind_remote_port(device_t dev, u_int remote_domain,
1254 u_int remote_port, driver_filter_t filter, driver_intr_t handler,
1255 void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep)
1257 struct xenisrc *isrc;
1258 struct evtchn_bind_interdomain bind_interdomain;
1261 bind_interdomain.remote_dom = remote_domain;
1262 bind_interdomain.remote_port = remote_port;
1263 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
1267 * XXX Trap Hypercall error code Linuxisms in
1268 * the HYPERCALL layer.
1273 error = xen_intr_bind_isrc(&isrc, bind_interdomain.local_port,
1274 EVTCHN_TYPE_PORT, device_get_nameunit(dev), filter, handler, arg,
1275 flags, port_handlep);
1277 evtchn_close_t close = { .port = bind_interdomain.local_port };
1278 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1279 panic("EVTCHNOP_close failed");
1284 * The Event Channel API opened this port, so it is
1285 * responsible for closing it automatically on unbind.
1292 xen_intr_bind_virq(device_t dev, u_int virq, u_int cpu,
1293 driver_filter_t filter, driver_intr_t handler, void *arg,
1294 enum intr_type flags, xen_intr_handle_t *port_handlep)
1296 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
1297 struct xenisrc *isrc;
1298 struct evtchn_bind_virq bind_virq = { .virq = virq, .vcpu = vcpu_id };
1302 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
1305 * XXX Trap Hypercall error code Linuxisms in
1306 * the HYPERCALL layer.
1311 error = xen_intr_bind_isrc(&isrc, bind_virq.port, EVTCHN_TYPE_VIRQ,
1312 device_get_nameunit(dev), filter, handler, arg, flags,
1317 error = intr_event_bind(isrc->xi_intsrc.is_event, cpu);
1321 evtchn_close_t close = { .port = bind_virq.port };
1323 xen_intr_unbind(*port_handlep);
1324 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1325 panic("EVTCHNOP_close failed");
1330 if (isrc->xi_cpu != cpu) {
1332 * Too early in the boot process for the generic interrupt
1333 * code to perform the binding. Update our event channel
1334 * masks manually so events can't fire on the wrong cpu
1335 * during AP startup.
1337 xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
1342 * The Event Channel API opened this port, so it is
1343 * responsible for closing it automatically on unbind.
1346 isrc->xi_virq = virq;
1352 xen_intr_alloc_and_bind_ipi(u_int cpu, driver_filter_t filter,
1353 enum intr_type flags, xen_intr_handle_t *port_handlep)
1356 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
1357 struct xenisrc *isrc;
1358 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
1359 /* Same size as the one used by intr_handler->ih_name. */
1360 char name[MAXCOMLEN + 1];
1364 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
1367 * XXX Trap Hypercall error code Linuxisms in
1368 * the HYPERCALL layer.
1373 snprintf(name, sizeof(name), "cpu%u", cpu);
1375 error = xen_intr_bind_isrc(&isrc, bind_ipi.port, EVTCHN_TYPE_IPI,
1376 name, filter, NULL, NULL, flags, port_handlep);
1378 evtchn_close_t close = { .port = bind_ipi.port };
1380 xen_intr_unbind(*port_handlep);
1381 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1382 panic("EVTCHNOP_close failed");
1386 if (isrc->xi_cpu != cpu) {
1388 * Too early in the boot process for the generic interrupt
1389 * code to perform the binding. Update our event channel
1390 * masks manually so events can't fire on the wrong cpu
1391 * during AP startup.
1393 xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
1397 * The Event Channel API opened this port, so it is
1398 * responsible for closing it automatically on unbind.
1403 return (EOPNOTSUPP);
1408 xen_register_pirq(int vector, enum intr_trigger trig, enum intr_polarity pol)
1410 struct physdev_map_pirq map_pirq;
1411 struct xenisrc *isrc;
1418 printf("xen: register IRQ#%d\n", vector);
1420 map_pirq.domid = DOMID_SELF;
1421 map_pirq.type = MAP_PIRQ_TYPE_GSI;
1422 map_pirq.index = vector;
1423 map_pirq.pirq = vector;
1425 error = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_pirq);
1427 printf("xen: unable to map IRQ#%d\n", vector);
1431 mtx_lock(&xen_intr_isrc_lock);
1432 isrc = xen_intr_alloc_isrc(EVTCHN_TYPE_PIRQ, vector);
1433 mtx_unlock(&xen_intr_isrc_lock);
1434 KASSERT((isrc != NULL), ("xen: unable to allocate isrc for interrupt"));
1435 isrc->xi_pirq = vector;
1436 isrc->xi_activehi = pol == INTR_POLARITY_HIGH ? 1 : 0;
1437 isrc->xi_edgetrigger = trig == INTR_TRIGGER_EDGE ? 1 : 0;
1443 xen_register_msi(device_t dev, int vector, int count)
1445 struct physdev_map_pirq msi_irq;
1446 struct xenisrc *isrc;
1449 memset(&msi_irq, 0, sizeof(msi_irq));
1450 msi_irq.domid = DOMID_SELF;
1451 msi_irq.type = count == 1 ?
1452 MAP_PIRQ_TYPE_MSI_SEG : MAP_PIRQ_TYPE_MULTI_MSI;
1455 msi_irq.bus = pci_get_bus(dev) | (pci_get_domain(dev) << 16);
1456 msi_irq.devfn = (pci_get_slot(dev) << 3) | pci_get_function(dev);
1457 msi_irq.entry_nr = count;
1459 ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &msi_irq);
1462 if (count != msi_irq.entry_nr) {
1463 panic("unable to setup all requested MSI vectors "
1464 "(expected %d got %d)", count, msi_irq.entry_nr);
1467 mtx_lock(&xen_intr_isrc_lock);
1468 for (int i = 0; i < count; i++) {
1469 isrc = xen_intr_alloc_isrc(EVTCHN_TYPE_PIRQ, vector + i);
1470 KASSERT(isrc != NULL,
1471 ("xen: unable to allocate isrc for interrupt"));
1472 isrc->xi_pirq = msi_irq.pirq + i;
1473 /* MSI interrupts are always edge triggered */
1474 isrc->xi_edgetrigger = 1;
1476 mtx_unlock(&xen_intr_isrc_lock);
1482 xen_release_msi(int vector)
1484 struct physdev_unmap_pirq unmap;
1485 struct xenisrc *isrc;
1488 isrc = (struct xenisrc *)intr_lookup_source(vector);
1492 unmap.pirq = isrc->xi_pirq;
1493 ret = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap);
1497 xen_intr_release_isrc(isrc);
1503 xen_intr_describe(xen_intr_handle_t port_handle, const char *fmt, ...)
1505 char descr[MAXCOMLEN + 1];
1506 struct xenisrc *isrc;
1509 isrc = xen_intr_isrc(port_handle);
1514 vsnprintf(descr, sizeof(descr), fmt, ap);
1516 return (intr_describe(isrc->xi_vector, isrc->xi_cookie, descr));
1520 xen_intr_unbind(xen_intr_handle_t *port_handlep)
1522 struct xenisrc *isrc;
1524 KASSERT(port_handlep != NULL,
1525 ("NULL xen_intr_handle_t passed to xen_intr_unbind"));
1527 isrc = xen_intr_isrc(*port_handlep);
1528 *port_handlep = NULL;
1532 if (isrc->xi_cookie != NULL)
1533 intr_remove_handler(isrc->xi_cookie);
1534 xen_intr_release_isrc(isrc);
1538 xen_intr_signal(xen_intr_handle_t handle)
1540 struct xenisrc *isrc;
1542 isrc = xen_intr_isrc(handle);
1544 KASSERT(isrc->xi_type == EVTCHN_TYPE_PORT ||
1545 isrc->xi_type == EVTCHN_TYPE_IPI,
1546 ("evtchn_signal on something other than a local port"));
1547 struct evtchn_send send = { .port = isrc->xi_port };
1548 (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
1553 xen_intr_port(xen_intr_handle_t handle)
1555 struct xenisrc *isrc;
1557 isrc = xen_intr_isrc(handle);
1561 return (isrc->xi_port);
1565 xen_intr_add_handler(const char *name, driver_filter_t filter,
1566 driver_intr_t handler, void *arg, enum intr_type flags,
1567 xen_intr_handle_t handle)
1569 struct xenisrc *isrc;
1572 isrc = xen_intr_isrc(handle);
1573 if (isrc == NULL || isrc->xi_cookie != NULL)
1576 error = intr_add_handler(name, isrc->xi_vector,filter, handler, arg,
1577 flags|INTR_EXCL, &isrc->xi_cookie);
1580 "%s: xen_intr_add_handler: intr_add_handler failed: %d\n",
1589 xen_intr_print_type(enum evtchn_type type)
1591 static const char *evtchn_type_to_string[EVTCHN_TYPE_COUNT] = {
1592 [EVTCHN_TYPE_UNBOUND] = "UNBOUND",
1593 [EVTCHN_TYPE_PIRQ] = "PIRQ",
1594 [EVTCHN_TYPE_VIRQ] = "VIRQ",
1595 [EVTCHN_TYPE_IPI] = "IPI",
1596 [EVTCHN_TYPE_PORT] = "PORT",
1599 if (type >= EVTCHN_TYPE_COUNT)
1602 return (evtchn_type_to_string[type]);
1606 xen_intr_dump_port(struct xenisrc *isrc)
1608 struct xen_intr_pcpu_data *pcpu;
1609 shared_info_t *s = HYPERVISOR_shared_info;
1612 db_printf("Port %d Type: %s\n",
1613 isrc->xi_port, xen_intr_print_type(isrc->xi_type));
1614 if (isrc->xi_type == EVTCHN_TYPE_PIRQ) {
1615 db_printf("\tPirq: %d ActiveHi: %d EdgeTrigger: %d "
1617 isrc->xi_pirq, isrc->xi_activehi, isrc->xi_edgetrigger,
1618 !!xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map));
1620 if (isrc->xi_type == EVTCHN_TYPE_VIRQ)
1621 db_printf("\tVirq: %d\n", isrc->xi_virq);
1623 db_printf("\tMasked: %d Pending: %d\n",
1624 !!xen_test_bit(isrc->xi_port, &s->evtchn_mask[0]),
1625 !!xen_test_bit(isrc->xi_port, &s->evtchn_pending[0]));
1627 db_printf("\tPer-CPU Masks: ");
1629 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
1630 db_printf("cpu#%d: %d ", i,
1631 !!xen_test_bit(isrc->xi_port, pcpu->evtchn_enabled));
1636 DB_SHOW_COMMAND(xen_evtchn, db_show_xen_evtchn)
1640 if (!xen_domain()) {
1641 db_printf("Only available on Xen guests\n");
1645 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1646 struct xenisrc *isrc;
1648 isrc = xen_intr_port_to_isrc[i];
1652 xen_intr_dump_port(isrc);