1 /******************************************************************************
4 * Xen event and interrupt services for x86 HVM guests.
6 * Copyright (c) 2002-2005, K A Fraser
7 * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
8 * Copyright (c) 2012, Spectra Logic Corporation
10 * This file may be distributed separately from the Linux kernel, or
11 * incorporated into other software packages, subject to the following license:
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
38 #include <sys/systm.h>
40 #include <sys/malloc.h>
41 #include <sys/kernel.h>
42 #include <sys/limits.h>
44 #include <sys/mutex.h>
45 #include <sys/interrupt.h>
48 #include <sys/refcount.h>
53 #include <machine/intr_machdep.h>
54 #include <x86/apicvar.h>
55 #include <x86/apicreg.h>
56 #include <machine/smp.h>
57 #include <machine/stdarg.h>
59 #include <machine/xen/synch_bitops.h>
60 #include <machine/xen/xen-os.h>
62 #include <xen/xen-os.h>
63 #include <xen/hypervisor.h>
64 #include <xen/xen_intr.h>
65 #include <xen/evtchn/evtchnvar.h>
67 #include <dev/xen/xenpci/xenpcivar.h>
68 #include <dev/pci/pcivar.h>
74 static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services");
76 static u_int first_evtchn_irq;
79 * Per-cpu event channel processing state.
81 struct xen_intr_pcpu_data {
83 * The last event channel bitmap section (level one bit) processed.
84 * This is used to ensure we scan all ports before
85 * servicing an already servied port again.
87 u_int last_processed_l1i;
90 * The last event channel processed within the event channel
91 * bitmap being scanned.
93 u_int last_processed_l2i;
95 /** Pointer to this CPU's interrupt statistic counter. */
96 u_long *evtchn_intrcnt;
99 * A bitmap of ports that can be serviced from this CPU.
100 * A set bit means interrupt handling is enabled.
102 u_long evtchn_enabled[sizeof(u_long) * 8];
106 * Start the scan at port 0 by initializing the last scanned
107 * location as the highest numbered event channel port.
109 DPCPU_DEFINE_STATIC(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
110 .last_processed_l1i = LONG_BIT - 1,
111 .last_processed_l2i = LONG_BIT - 1
114 DPCPU_DECLARE(struct vcpu_info *, vcpu_info);
116 #define XEN_EEXIST 17 /* Xen "already exists" error */
117 #define XEN_ALLOCATE_VECTOR 0 /* Allocate a vector for this event channel */
118 #define XEN_INVALID_EVTCHN 0 /* Invalid event channel */
120 #define is_valid_evtchn(x) ((x) != XEN_INVALID_EVTCHN)
123 struct intsrc xi_intsrc;
124 enum evtchn_type xi_type;
125 int xi_cpu; /* VCPU for delivery. */
126 int xi_vector; /* Global isrc vector number. */
127 evtchn_port_t xi_port;
131 u_int xi_close:1; /* close on unbind? */
133 u_int xi_edgetrigger:1;
135 volatile u_int xi_refcount;
138 static void xen_intr_suspend(struct pic *);
139 static void xen_intr_resume(struct pic *, bool suspend_cancelled);
140 static void xen_intr_enable_source(struct intsrc *isrc);
141 static void xen_intr_disable_source(struct intsrc *isrc, int eoi);
142 static void xen_intr_eoi_source(struct intsrc *isrc);
143 static void xen_intr_enable_intr(struct intsrc *isrc);
144 static void xen_intr_disable_intr(struct intsrc *isrc);
145 static int xen_intr_vector(struct intsrc *isrc);
146 static int xen_intr_source_pending(struct intsrc *isrc);
147 static int xen_intr_config_intr(struct intsrc *isrc,
148 enum intr_trigger trig, enum intr_polarity pol);
149 static int xen_intr_assign_cpu(struct intsrc *isrc, u_int apic_id);
151 static void xen_intr_pirq_enable_source(struct intsrc *isrc);
152 static void xen_intr_pirq_disable_source(struct intsrc *isrc, int eoi);
153 static void xen_intr_pirq_eoi_source(struct intsrc *isrc);
154 static void xen_intr_pirq_enable_intr(struct intsrc *isrc);
155 static void xen_intr_pirq_disable_intr(struct intsrc *isrc);
156 static int xen_intr_pirq_config_intr(struct intsrc *isrc,
157 enum intr_trigger trig, enum intr_polarity pol);
160 * PIC interface for all event channel port types except physical IRQs.
162 struct pic xen_intr_pic = {
163 .pic_enable_source = xen_intr_enable_source,
164 .pic_disable_source = xen_intr_disable_source,
165 .pic_eoi_source = xen_intr_eoi_source,
166 .pic_enable_intr = xen_intr_enable_intr,
167 .pic_disable_intr = xen_intr_disable_intr,
168 .pic_vector = xen_intr_vector,
169 .pic_source_pending = xen_intr_source_pending,
170 .pic_suspend = xen_intr_suspend,
171 .pic_resume = xen_intr_resume,
172 .pic_config_intr = xen_intr_config_intr,
173 .pic_assign_cpu = xen_intr_assign_cpu
177 * PIC interface for all event channel representing
178 * physical interrupt sources.
180 struct pic xen_intr_pirq_pic = {
182 .pic_register_sources = xenpv_register_pirqs,
184 .pic_enable_source = xen_intr_pirq_enable_source,
185 .pic_disable_source = xen_intr_pirq_disable_source,
186 .pic_eoi_source = xen_intr_pirq_eoi_source,
187 .pic_enable_intr = xen_intr_pirq_enable_intr,
188 .pic_disable_intr = xen_intr_pirq_disable_intr,
189 .pic_vector = xen_intr_vector,
190 .pic_source_pending = xen_intr_source_pending,
191 .pic_config_intr = xen_intr_pirq_config_intr,
192 .pic_assign_cpu = xen_intr_assign_cpu
195 static struct mtx xen_intr_isrc_lock;
196 static u_int xen_intr_auto_vector_count;
197 static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS];
198 static u_long *xen_intr_pirq_eoi_map;
199 static boolean_t xen_intr_pirq_eoi_map_enabled;
201 /*------------------------- Private Functions --------------------------------*/
203 * Disable signal delivery for an event channel port on the
206 * \param port The event channel port to mask.
208 * This API is used to manage the port<=>CPU binding of event
211 * \note This operation does not preclude reception of an event
212 * for this event channel on another CPU. To mask the
213 * event channel globally, use evtchn_mask().
216 evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port)
218 struct xen_intr_pcpu_data *pcpu;
220 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
221 xen_clear_bit(port, pcpu->evtchn_enabled);
225 * Enable signal delivery for an event channel port on the
228 * \param port The event channel port to unmask.
230 * This API is used to manage the port<=>CPU binding of event
233 * \note This operation does not guarantee that event delivery
234 * is enabled for this event channel port. The port must
235 * also be globally enabled. See evtchn_unmask().
238 evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port)
240 struct xen_intr_pcpu_data *pcpu;
242 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
243 xen_set_bit(port, pcpu->evtchn_enabled);
247 * Allocate and register a per-cpu Xen upcall interrupt counter.
249 * \param cpu The cpu for which to register this interrupt count.
252 xen_intr_intrcnt_add(u_int cpu)
254 char buf[MAXCOMLEN + 1];
255 struct xen_intr_pcpu_data *pcpu;
257 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
258 if (pcpu->evtchn_intrcnt != NULL)
261 snprintf(buf, sizeof(buf), "cpu%d:xen", cpu);
262 intrcnt_add(buf, &pcpu->evtchn_intrcnt);
266 * Search for an already allocated but currently unused Xen interrupt
269 * \param type Restrict the search to interrupt sources of the given
272 * \return A pointer to a free Xen interrupt source object or NULL.
274 static struct xenisrc *
275 xen_intr_find_unused_isrc(enum evtchn_type type)
279 KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn isrc lock not held"));
281 for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx ++) {
282 struct xenisrc *isrc;
285 vector = first_evtchn_irq + isrc_idx;
286 isrc = (struct xenisrc *)intr_lookup_source(vector);
288 && isrc->xi_type == EVTCHN_TYPE_UNBOUND) {
289 KASSERT(isrc->xi_intsrc.is_handlers == 0,
290 ("Free evtchn still has handlers"));
291 isrc->xi_type = type;
299 * Allocate a Xen interrupt source object.
301 * \param type The type of interrupt source to create.
303 * \return A pointer to a newly allocated Xen interrupt source
306 static struct xenisrc *
307 xen_intr_alloc_isrc(enum evtchn_type type, int vector)
310 struct xenisrc *isrc;
312 KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn alloc lock not held"));
314 if (xen_intr_auto_vector_count > NR_EVENT_CHANNELS) {
317 printf("xen_intr_alloc: Event channels exhausted.\n");
322 if (type != EVTCHN_TYPE_PIRQ) {
323 vector = first_evtchn_irq + xen_intr_auto_vector_count;
324 xen_intr_auto_vector_count++;
327 KASSERT((intr_lookup_source(vector) == NULL),
328 ("Trying to use an already allocated vector"));
330 mtx_unlock(&xen_intr_isrc_lock);
331 isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO);
332 isrc->xi_intsrc.is_pic =
333 (type == EVTCHN_TYPE_PIRQ) ? &xen_intr_pirq_pic : &xen_intr_pic;
334 isrc->xi_vector = vector;
335 isrc->xi_type = type;
336 intr_register_source(&isrc->xi_intsrc);
337 mtx_lock(&xen_intr_isrc_lock);
343 * Attempt to free an active Xen interrupt source object.
345 * \param isrc The interrupt source object to release.
347 * \returns EBUSY if the source is still in use, otherwise 0.
350 xen_intr_release_isrc(struct xenisrc *isrc)
353 mtx_lock(&xen_intr_isrc_lock);
354 KASSERT(isrc->xi_intsrc.is_handlers == 0,
355 ("Release called, but xenisrc still in use"));
356 evtchn_mask_port(isrc->xi_port);
357 evtchn_clear_port(isrc->xi_port);
359 /* Rebind port to CPU 0. */
360 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
361 evtchn_cpu_unmask_port(0, isrc->xi_port);
363 if (isrc->xi_close != 0 && is_valid_evtchn(isrc->xi_port)) {
364 struct evtchn_close close = { .port = isrc->xi_port };
365 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
366 panic("EVTCHNOP_close failed");
369 xen_intr_port_to_isrc[isrc->xi_port] = NULL;
371 isrc->xi_type = EVTCHN_TYPE_UNBOUND;
373 isrc->xi_cookie = NULL;
374 mtx_unlock(&xen_intr_isrc_lock);
379 * Associate an interrupt handler with an already allocated local Xen
380 * event channel port.
382 * \param isrcp The returned Xen interrupt object associated with
383 * the specified local port.
384 * \param local_port The event channel to bind.
385 * \param type The event channel type of local_port.
386 * \param intr_owner The device making this bind request.
387 * \param filter An interrupt filter handler. Specify NULL
388 * to always dispatch to the ithread handler.
389 * \param handler An interrupt ithread handler. Optional (can
390 * specify NULL) if all necessary event actions
391 * are performed by filter.
392 * \param arg Argument to present to both filter and handler.
393 * \param irqflags Interrupt handler flags. See sys/bus.h.
394 * \param handlep Pointer to an opaque handle used to manage this
397 * \returns 0 on success, otherwise an errno.
400 xen_intr_bind_isrc(struct xenisrc **isrcp, evtchn_port_t local_port,
401 enum evtchn_type type, const char *intr_owner, driver_filter_t filter,
402 driver_intr_t handler, void *arg, enum intr_type flags,
403 xen_intr_handle_t *port_handlep)
405 struct xenisrc *isrc;
409 if (port_handlep == NULL) {
410 printf("%s: xen_intr_bind_isrc: Bad event handle\n",
415 mtx_lock(&xen_intr_isrc_lock);
416 isrc = xen_intr_find_unused_isrc(type);
418 isrc = xen_intr_alloc_isrc(type, XEN_ALLOCATE_VECTOR);
420 mtx_unlock(&xen_intr_isrc_lock);
424 isrc->xi_port = local_port;
425 xen_intr_port_to_isrc[local_port] = isrc;
426 refcount_init(&isrc->xi_refcount, 1);
427 mtx_unlock(&xen_intr_isrc_lock);
429 /* Assign the opaque handler (the event channel port) */
430 *port_handlep = &isrc->xi_vector;
433 if (type == EVTCHN_TYPE_PORT) {
435 * By default all interrupts are assigned to vCPU#0
436 * unless specified otherwise, so shuffle them to balance
437 * the interrupt load.
439 xen_intr_assign_cpu(&isrc->xi_intsrc, intr_next_cpu(0));
443 if (filter == NULL && handler == NULL) {
445 * No filter/handler provided, leave the event channel
446 * masked and without a valid handler, the caller is
447 * in charge of setting that up.
453 error = xen_intr_add_handler(intr_owner, filter, handler, arg, flags,
456 xen_intr_release_isrc(isrc);
464 * Lookup a Xen interrupt source object given an interrupt binding handle.
466 * \param handle A handle initialized by a previous call to
467 * xen_intr_bind_isrc().
469 * \returns A pointer to the Xen interrupt source object associated
470 * with the given interrupt handle. NULL if no association
473 static struct xenisrc *
474 xen_intr_isrc(xen_intr_handle_t handle)
481 vector = *(int *)handle;
482 KASSERT(vector >= first_evtchn_irq &&
483 vector < (first_evtchn_irq + xen_intr_auto_vector_count),
484 ("Xen interrupt vector is out of range"));
486 return ((struct xenisrc *)intr_lookup_source(vector));
490 * Determine the event channel ports at the given section of the
491 * event port bitmap which have pending events for the given cpu.
493 * \param pcpu The Xen interrupt pcpu data for the cpu being querried.
494 * \param sh The Xen shared info area.
495 * \param idx The index of the section of the event channel bitmap to
498 * \returns A u_long with bits set for every event channel with pending
502 xen_intr_active_ports(struct xen_intr_pcpu_data *pcpu, shared_info_t *sh,
506 CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(sh->evtchn_pending[0]));
507 CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(pcpu->evtchn_enabled[0]));
508 CTASSERT(sizeof(sh->evtchn_mask) == sizeof(sh->evtchn_pending));
509 CTASSERT(sizeof(sh->evtchn_mask) == sizeof(pcpu->evtchn_enabled));
510 return (sh->evtchn_pending[idx]
511 & ~sh->evtchn_mask[idx]
512 & pcpu->evtchn_enabled[idx]);
516 * Interrupt handler for processing all Xen event channel events.
518 * \param trap_frame The trap frame context for the current interrupt.
521 xen_intr_handle_upcall(struct trapframe *trap_frame)
523 u_int l1i, l2i, port, cpu;
524 u_long masked_l1, masked_l2;
525 struct xenisrc *isrc;
528 struct xen_intr_pcpu_data *pc;
532 * Disable preemption in order to always check and fire events
537 cpu = PCPU_GET(cpuid);
538 pc = DPCPU_PTR(xen_intr_pcpu);
539 s = HYPERVISOR_shared_info;
540 v = DPCPU_GET(vcpu_info);
542 if (xen_hvm_domain() && !xen_vector_callback_enabled) {
543 KASSERT((cpu == 0), ("Fired PCI event callback on wrong CPU"));
546 v->evtchn_upcall_pending = 0;
549 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
550 /* Clear master flag /before/ clearing selector flag. */
555 l1 = atomic_readandclear_long(&v->evtchn_pending_sel);
557 l1i = pc->last_processed_l1i;
558 l2i = pc->last_processed_l2i;
559 (*pc->evtchn_intrcnt)++;
563 l1i = (l1i + 1) % LONG_BIT;
564 masked_l1 = l1 & ((~0UL) << l1i);
566 if (masked_l1 == 0) {
568 * if we masked out all events, wrap around
575 l1i = ffsl(masked_l1) - 1;
578 l2 = xen_intr_active_ports(pc, s, l1i);
580 l2i = (l2i + 1) % LONG_BIT;
581 masked_l2 = l2 & ((~0UL) << l2i);
583 if (masked_l2 == 0) {
584 /* if we masked out all events, move on */
588 l2i = ffsl(masked_l2) - 1;
591 port = (l1i * LONG_BIT) + l2i;
592 synch_clear_bit(port, &s->evtchn_pending[0]);
594 isrc = xen_intr_port_to_isrc[port];
595 if (__predict_false(isrc == NULL))
598 /* Make sure we are firing on the right vCPU */
599 KASSERT((isrc->xi_cpu == PCPU_GET(cpuid)),
600 ("Received unexpected event on vCPU#%d, event bound to vCPU#%d",
601 PCPU_GET(cpuid), isrc->xi_cpu));
603 intr_execute_handlers(&isrc->xi_intsrc, trap_frame);
606 * If this is the final port processed,
607 * we'll pick up here+1 next time.
609 pc->last_processed_l1i = l1i;
610 pc->last_processed_l2i = l2i;
612 } while (l2i != LONG_BIT - 1);
614 l2 = xen_intr_active_ports(pc, s, l1i);
617 * We handled all ports, so we can clear the
627 xen_intr_init(void *dummy __unused)
629 shared_info_t *s = HYPERVISOR_shared_info;
630 struct xen_intr_pcpu_data *pcpu;
631 struct physdev_pirq_eoi_gmfn eoi_gmfn;
637 mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF);
640 * Set the per-cpu mask of CPU#0 to enable all, since by default all
641 * event channels are bound to CPU#0.
644 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
645 memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
646 sizeof(pcpu->evtchn_enabled));
649 for (i = 0; i < nitems(s->evtchn_mask); i++)
650 atomic_store_rel_long(&s->evtchn_mask[i], ~0);
652 /* Try to register PIRQ EOI map */
653 xen_intr_pirq_eoi_map = malloc(PAGE_SIZE, M_XENINTR, M_WAITOK | M_ZERO);
654 eoi_gmfn.gmfn = atop(vtophys(xen_intr_pirq_eoi_map));
655 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
656 if (rc != 0 && bootverbose)
657 printf("Xen interrupts: unable to register PIRQ EOI map\n");
659 xen_intr_pirq_eoi_map_enabled = true;
661 intr_register_pic(&xen_intr_pic);
662 if (xen_pv_domain() && xen_initial_domain())
663 intr_register_pic(&xen_intr_pirq_pic);
666 printf("Xen interrupt system initialized\n");
670 SYSINIT(xen_intr_init, SI_SUB_INTR, SI_ORDER_SECOND, xen_intr_init, NULL);
673 xen_intrcnt_init(void *dummy __unused)
681 * Register interrupt count manually as we aren't guaranteed to see a
682 * call to xen_intr_assign_cpu() before our first interrupt.
685 xen_intr_intrcnt_add(i);
687 SYSINIT(xen_intrcnt_init, SI_SUB_INTR, SI_ORDER_MIDDLE, xen_intrcnt_init, NULL);
690 xen_intr_alloc_irqs(void)
693 first_evtchn_irq = num_io_irqs;
694 num_io_irqs += NR_EVENT_CHANNELS;
697 /*--------------------------- Common PIC Functions ---------------------------*/
699 * Prepare this PIC for system suspension.
702 xen_intr_suspend(struct pic *unused)
707 xen_rebind_ipi(struct xenisrc *isrc)
710 int cpu = isrc->xi_cpu;
711 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
713 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
715 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
718 panic("unable to rebind xen IPI: %d", error);
720 isrc->xi_port = bind_ipi.port;
722 xen_intr_port_to_isrc[bind_ipi.port] = isrc;
724 error = xen_intr_assign_cpu(&isrc->xi_intsrc,
727 panic("unable to bind xen IPI to CPU#%d: %d",
730 evtchn_unmask_port(bind_ipi.port);
732 panic("Resume IPI event channel on UP");
737 xen_rebind_virq(struct xenisrc *isrc)
739 int cpu = isrc->xi_cpu;
740 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
742 struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq,
745 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
748 panic("unable to rebind xen VIRQ#%d: %d", isrc->xi_virq, error);
750 isrc->xi_port = bind_virq.port;
752 xen_intr_port_to_isrc[bind_virq.port] = isrc;
755 error = xen_intr_assign_cpu(&isrc->xi_intsrc,
758 panic("unable to bind xen VIRQ#%d to CPU#%d: %d",
759 isrc->xi_virq, cpu, error);
762 evtchn_unmask_port(bind_virq.port);
766 * Return this PIC to service after being suspended.
769 xen_intr_resume(struct pic *unused, bool suspend_cancelled)
771 shared_info_t *s = HYPERVISOR_shared_info;
772 struct xenisrc *isrc;
776 if (suspend_cancelled)
779 /* Reset the per-CPU masks */
781 struct xen_intr_pcpu_data *pcpu;
783 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
784 memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
785 sizeof(pcpu->evtchn_enabled));
788 /* Mask all event channels. */
789 for (i = 0; i < nitems(s->evtchn_mask); i++)
790 atomic_store_rel_long(&s->evtchn_mask[i], ~0);
792 /* Remove port -> isrc mappings */
793 memset(xen_intr_port_to_isrc, 0, sizeof(xen_intr_port_to_isrc));
795 /* Free unused isrcs and rebind VIRQs and IPIs */
796 for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx++) {
799 vector = first_evtchn_irq + isrc_idx;
800 isrc = (struct xenisrc *)intr_lookup_source(vector);
803 switch (isrc->xi_type) {
804 case EVTCHN_TYPE_IPI:
805 xen_rebind_ipi(isrc);
807 case EVTCHN_TYPE_VIRQ:
808 xen_rebind_virq(isrc);
818 * Disable a Xen interrupt source.
820 * \param isrc The interrupt source to disable.
823 xen_intr_disable_intr(struct intsrc *base_isrc)
825 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
827 evtchn_mask_port(isrc->xi_port);
831 * Determine the global interrupt vector number for
832 * a Xen interrupt source.
834 * \param isrc The interrupt source to query.
836 * \return The vector number corresponding to the given interrupt source.
839 xen_intr_vector(struct intsrc *base_isrc)
841 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
843 return (isrc->xi_vector);
847 * Determine whether or not interrupt events are pending on the
848 * the given interrupt source.
850 * \param isrc The interrupt source to query.
852 * \returns 0 if no events are pending, otherwise non-zero.
855 xen_intr_source_pending(struct intsrc *isrc)
858 * EventChannels are edge triggered and never masked.
859 * There can be no pending events.
865 * Perform configuration of an interrupt source.
867 * \param isrc The interrupt source to configure.
868 * \param trig Edge or level.
869 * \param pol Active high or low.
871 * \returns 0 if no events are pending, otherwise non-zero.
874 xen_intr_config_intr(struct intsrc *isrc, enum intr_trigger trig,
875 enum intr_polarity pol)
877 /* Configuration is only possible via the evtchn apis. */
882 * Configure CPU affinity for interrupt source event delivery.
884 * \param isrc The interrupt source to configure.
885 * \param apic_id The apic id of the CPU for handling future events.
887 * \returns 0 if successful, otherwise an errno.
890 xen_intr_assign_cpu(struct intsrc *base_isrc, u_int apic_id)
893 struct evtchn_bind_vcpu bind_vcpu;
894 struct xenisrc *isrc;
895 u_int to_cpu, vcpu_id;
898 if (xen_vector_callback_enabled == 0)
901 to_cpu = apic_cpuid(apic_id);
902 vcpu_id = pcpu_find(to_cpu)->pc_vcpu_id;
904 mtx_lock(&xen_intr_isrc_lock);
905 isrc = (struct xenisrc *)base_isrc;
906 if (!is_valid_evtchn(isrc->xi_port)) {
907 mtx_unlock(&xen_intr_isrc_lock);
912 * Mask the event channel while binding it to prevent interrupt
913 * delivery with an inconsistent state in isrc->xi_cpu.
915 masked = evtchn_test_and_set_mask(isrc->xi_port);
916 if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) ||
917 (isrc->xi_type == EVTCHN_TYPE_IPI)) {
919 * Virtual IRQs are associated with a cpu by
920 * the Hypervisor at evtchn_bind_virq time, so
921 * all we need to do is update the per-CPU masks.
923 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
924 isrc->xi_cpu = to_cpu;
925 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
929 bind_vcpu.port = isrc->xi_port;
930 bind_vcpu.vcpu = vcpu_id;
932 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu);
933 if (isrc->xi_cpu != to_cpu) {
935 /* Commit to new binding by removing the old one. */
936 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
937 isrc->xi_cpu = to_cpu;
938 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
944 evtchn_unmask_port(isrc->xi_port);
945 mtx_unlock(&xen_intr_isrc_lock);
952 /*------------------- Virtual Interrupt Source PIC Functions -----------------*/
954 * Mask a level triggered interrupt source.
956 * \param isrc The interrupt source to mask (if necessary).
957 * \param eoi If non-zero, perform any necessary end-of-interrupt
961 xen_intr_disable_source(struct intsrc *base_isrc, int eoi)
963 struct xenisrc *isrc;
965 isrc = (struct xenisrc *)base_isrc;
968 * NB: checking if the event channel is already masked is
969 * needed because the event channel user-space device
970 * masks event channels on its filter as part of its
971 * normal operation, and those shouldn't be automatically
972 * unmasked by the generic interrupt code. The event channel
973 * device will unmask them when needed.
975 isrc->xi_masked = !!evtchn_test_and_set_mask(isrc->xi_port);
979 * Unmask a level triggered interrupt source.
981 * \param isrc The interrupt source to unmask (if necessary).
984 xen_intr_enable_source(struct intsrc *base_isrc)
986 struct xenisrc *isrc;
988 isrc = (struct xenisrc *)base_isrc;
990 if (isrc->xi_masked == 0)
991 evtchn_unmask_port(isrc->xi_port);
995 * Perform any necessary end-of-interrupt acknowledgements.
997 * \param isrc The interrupt source to EOI.
1000 xen_intr_eoi_source(struct intsrc *base_isrc)
1005 * Enable and unmask the interrupt source.
1007 * \param isrc The interrupt source to enable.
1010 xen_intr_enable_intr(struct intsrc *base_isrc)
1012 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
1014 evtchn_unmask_port(isrc->xi_port);
1017 /*------------------ Physical Interrupt Source PIC Functions -----------------*/
1019 * Mask a level triggered interrupt source.
1021 * \param isrc The interrupt source to mask (if necessary).
1022 * \param eoi If non-zero, perform any necessary end-of-interrupt
1026 xen_intr_pirq_disable_source(struct intsrc *base_isrc, int eoi)
1028 struct xenisrc *isrc;
1030 isrc = (struct xenisrc *)base_isrc;
1032 if (isrc->xi_edgetrigger == 0)
1033 evtchn_mask_port(isrc->xi_port);
1035 xen_intr_pirq_eoi_source(base_isrc);
1039 * Unmask a level triggered interrupt source.
1041 * \param isrc The interrupt source to unmask (if necessary).
1044 xen_intr_pirq_enable_source(struct intsrc *base_isrc)
1046 struct xenisrc *isrc;
1048 isrc = (struct xenisrc *)base_isrc;
1050 if (isrc->xi_edgetrigger == 0)
1051 evtchn_unmask_port(isrc->xi_port);
1055 * Perform any necessary end-of-interrupt acknowledgements.
1057 * \param isrc The interrupt source to EOI.
1060 xen_intr_pirq_eoi_source(struct intsrc *base_isrc)
1062 struct xenisrc *isrc;
1065 isrc = (struct xenisrc *)base_isrc;
1067 if (xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map)) {
1068 struct physdev_eoi eoi = { .irq = isrc->xi_pirq };
1070 error = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
1072 panic("Unable to EOI PIRQ#%d: %d\n",
1073 isrc->xi_pirq, error);
1078 * Enable and unmask the interrupt source.
1080 * \param isrc The interrupt source to enable.
1083 xen_intr_pirq_enable_intr(struct intsrc *base_isrc)
1085 struct xenisrc *isrc;
1086 struct evtchn_bind_pirq bind_pirq;
1087 struct physdev_irq_status_query irq_status;
1090 isrc = (struct xenisrc *)base_isrc;
1092 if (!xen_intr_pirq_eoi_map_enabled) {
1093 irq_status.irq = isrc->xi_pirq;
1094 error = HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query,
1097 panic("unable to get status of IRQ#%d", isrc->xi_pirq);
1099 if (irq_status.flags & XENIRQSTAT_needs_eoi) {
1101 * Since the dynamic PIRQ EOI map is not available
1102 * mark the PIRQ as needing EOI unconditionally.
1104 xen_set_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map);
1108 bind_pirq.pirq = isrc->xi_pirq;
1109 bind_pirq.flags = isrc->xi_edgetrigger ? 0 : BIND_PIRQ__WILL_SHARE;
1110 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
1112 panic("unable to bind IRQ#%d", isrc->xi_pirq);
1114 isrc->xi_port = bind_pirq.port;
1116 mtx_lock(&xen_intr_isrc_lock);
1117 KASSERT((xen_intr_port_to_isrc[bind_pirq.port] == NULL),
1118 ("trying to override an already setup event channel port"));
1119 xen_intr_port_to_isrc[bind_pirq.port] = isrc;
1120 mtx_unlock(&xen_intr_isrc_lock);
1122 evtchn_unmask_port(isrc->xi_port);
1126 * Disable an interrupt source.
1128 * \param isrc The interrupt source to disable.
1131 xen_intr_pirq_disable_intr(struct intsrc *base_isrc)
1133 struct xenisrc *isrc;
1134 struct evtchn_close close;
1137 isrc = (struct xenisrc *)base_isrc;
1139 evtchn_mask_port(isrc->xi_port);
1141 close.port = isrc->xi_port;
1142 error = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
1144 panic("unable to close event channel %d IRQ#%d",
1145 isrc->xi_port, isrc->xi_pirq);
1147 mtx_lock(&xen_intr_isrc_lock);
1148 xen_intr_port_to_isrc[isrc->xi_port] = NULL;
1149 mtx_unlock(&xen_intr_isrc_lock);
1155 * Perform configuration of an interrupt source.
1157 * \param isrc The interrupt source to configure.
1158 * \param trig Edge or level.
1159 * \param pol Active high or low.
1161 * \returns 0 if no events are pending, otherwise non-zero.
1164 xen_intr_pirq_config_intr(struct intsrc *base_isrc, enum intr_trigger trig,
1165 enum intr_polarity pol)
1167 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
1168 struct physdev_setup_gsi setup_gsi;
1171 KASSERT(!(trig == INTR_TRIGGER_CONFORM || pol == INTR_POLARITY_CONFORM),
1172 ("%s: Conforming trigger or polarity\n", __func__));
1174 setup_gsi.gsi = isrc->xi_pirq;
1175 setup_gsi.triggering = trig == INTR_TRIGGER_EDGE ? 0 : 1;
1176 setup_gsi.polarity = pol == INTR_POLARITY_HIGH ? 0 : 1;
1178 error = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
1179 if (error == -XEN_EEXIST) {
1180 if ((isrc->xi_edgetrigger && (trig != INTR_TRIGGER_EDGE)) ||
1181 (isrc->xi_activehi && (pol != INTR_POLARITY_HIGH)))
1182 panic("unable to reconfigure interrupt IRQ#%d",
1187 panic("unable to configure IRQ#%d\n", isrc->xi_pirq);
1189 isrc->xi_activehi = pol == INTR_POLARITY_HIGH ? 1 : 0;
1190 isrc->xi_edgetrigger = trig == INTR_TRIGGER_EDGE ? 1 : 0;
1195 /*--------------------------- Public Functions -------------------------------*/
1196 /*------- API comments for these methods can be found in xen/xenintr.h -------*/
1198 xen_intr_bind_local_port(device_t dev, evtchn_port_t local_port,
1199 driver_filter_t filter, driver_intr_t handler, void *arg,
1200 enum intr_type flags, xen_intr_handle_t *port_handlep)
1202 struct xenisrc *isrc;
1205 error = xen_intr_bind_isrc(&isrc, local_port, EVTCHN_TYPE_PORT,
1206 device_get_nameunit(dev), filter, handler, arg, flags,
1212 * The Event Channel API didn't open this port, so it is not
1213 * responsible for closing it automatically on unbind.
1220 xen_intr_alloc_and_bind_local_port(device_t dev, u_int remote_domain,
1221 driver_filter_t filter, driver_intr_t handler, void *arg,
1222 enum intr_type flags, xen_intr_handle_t *port_handlep)
1224 struct xenisrc *isrc;
1225 struct evtchn_alloc_unbound alloc_unbound;
1228 alloc_unbound.dom = DOMID_SELF;
1229 alloc_unbound.remote_dom = remote_domain;
1230 error = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
1234 * XXX Trap Hypercall error code Linuxisms in
1235 * the HYPERCALL layer.
1240 error = xen_intr_bind_isrc(&isrc, alloc_unbound.port, EVTCHN_TYPE_PORT,
1241 device_get_nameunit(dev), filter, handler, arg, flags,
1244 evtchn_close_t close = { .port = alloc_unbound.port };
1245 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1246 panic("EVTCHNOP_close failed");
1255 xen_intr_bind_remote_port(device_t dev, u_int remote_domain,
1256 u_int remote_port, driver_filter_t filter, driver_intr_t handler,
1257 void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep)
1259 struct xenisrc *isrc;
1260 struct evtchn_bind_interdomain bind_interdomain;
1263 bind_interdomain.remote_dom = remote_domain;
1264 bind_interdomain.remote_port = remote_port;
1265 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
1269 * XXX Trap Hypercall error code Linuxisms in
1270 * the HYPERCALL layer.
1275 error = xen_intr_bind_isrc(&isrc, bind_interdomain.local_port,
1276 EVTCHN_TYPE_PORT, device_get_nameunit(dev), filter, handler, arg,
1277 flags, port_handlep);
1279 evtchn_close_t close = { .port = bind_interdomain.local_port };
1280 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1281 panic("EVTCHNOP_close failed");
1286 * The Event Channel API opened this port, so it is
1287 * responsible for closing it automatically on unbind.
1294 xen_intr_bind_virq(device_t dev, u_int virq, u_int cpu,
1295 driver_filter_t filter, driver_intr_t handler, void *arg,
1296 enum intr_type flags, xen_intr_handle_t *port_handlep)
1298 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
1299 struct xenisrc *isrc;
1300 struct evtchn_bind_virq bind_virq = { .virq = virq, .vcpu = vcpu_id };
1304 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
1307 * XXX Trap Hypercall error code Linuxisms in
1308 * the HYPERCALL layer.
1313 error = xen_intr_bind_isrc(&isrc, bind_virq.port, EVTCHN_TYPE_VIRQ,
1314 device_get_nameunit(dev), filter, handler, arg, flags,
1319 error = intr_event_bind(isrc->xi_intsrc.is_event, cpu);
1323 evtchn_close_t close = { .port = bind_virq.port };
1325 xen_intr_unbind(*port_handlep);
1326 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1327 panic("EVTCHNOP_close failed");
1332 if (isrc->xi_cpu != cpu) {
1334 * Too early in the boot process for the generic interrupt
1335 * code to perform the binding. Update our event channel
1336 * masks manually so events can't fire on the wrong cpu
1337 * during AP startup.
1339 xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
1344 * The Event Channel API opened this port, so it is
1345 * responsible for closing it automatically on unbind.
1348 isrc->xi_virq = virq;
1354 xen_intr_alloc_and_bind_ipi(u_int cpu, driver_filter_t filter,
1355 enum intr_type flags, xen_intr_handle_t *port_handlep)
1358 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
1359 struct xenisrc *isrc;
1360 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
1361 /* Same size as the one used by intr_handler->ih_name. */
1362 char name[MAXCOMLEN + 1];
1366 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
1369 * XXX Trap Hypercall error code Linuxisms in
1370 * the HYPERCALL layer.
1375 snprintf(name, sizeof(name), "cpu%u", cpu);
1377 error = xen_intr_bind_isrc(&isrc, bind_ipi.port, EVTCHN_TYPE_IPI,
1378 name, filter, NULL, NULL, flags, port_handlep);
1380 evtchn_close_t close = { .port = bind_ipi.port };
1382 xen_intr_unbind(*port_handlep);
1383 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1384 panic("EVTCHNOP_close failed");
1388 if (isrc->xi_cpu != cpu) {
1390 * Too early in the boot process for the generic interrupt
1391 * code to perform the binding. Update our event channel
1392 * masks manually so events can't fire on the wrong cpu
1393 * during AP startup.
1395 xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
1399 * The Event Channel API opened this port, so it is
1400 * responsible for closing it automatically on unbind.
1405 return (EOPNOTSUPP);
1410 xen_register_pirq(int vector, enum intr_trigger trig, enum intr_polarity pol)
1412 struct physdev_map_pirq map_pirq;
1413 struct xenisrc *isrc;
1420 printf("xen: register IRQ#%d\n", vector);
1422 map_pirq.domid = DOMID_SELF;
1423 map_pirq.type = MAP_PIRQ_TYPE_GSI;
1424 map_pirq.index = vector;
1425 map_pirq.pirq = vector;
1427 error = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_pirq);
1429 printf("xen: unable to map IRQ#%d\n", vector);
1433 mtx_lock(&xen_intr_isrc_lock);
1434 isrc = xen_intr_alloc_isrc(EVTCHN_TYPE_PIRQ, vector);
1435 mtx_unlock(&xen_intr_isrc_lock);
1436 KASSERT((isrc != NULL), ("xen: unable to allocate isrc for interrupt"));
1437 isrc->xi_pirq = vector;
1438 isrc->xi_activehi = pol == INTR_POLARITY_HIGH ? 1 : 0;
1439 isrc->xi_edgetrigger = trig == INTR_TRIGGER_EDGE ? 1 : 0;
1445 xen_register_msi(device_t dev, int vector, int count)
1447 struct physdev_map_pirq msi_irq;
1448 struct xenisrc *isrc;
1451 memset(&msi_irq, 0, sizeof(msi_irq));
1452 msi_irq.domid = DOMID_SELF;
1453 msi_irq.type = count == 1 ?
1454 MAP_PIRQ_TYPE_MSI_SEG : MAP_PIRQ_TYPE_MULTI_MSI;
1457 msi_irq.bus = pci_get_bus(dev) | (pci_get_domain(dev) << 16);
1458 msi_irq.devfn = (pci_get_slot(dev) << 3) | pci_get_function(dev);
1459 msi_irq.entry_nr = count;
1461 ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &msi_irq);
1464 if (count != msi_irq.entry_nr) {
1465 panic("unable to setup all requested MSI vectors "
1466 "(expected %d got %d)", count, msi_irq.entry_nr);
1469 mtx_lock(&xen_intr_isrc_lock);
1470 for (int i = 0; i < count; i++) {
1471 isrc = xen_intr_alloc_isrc(EVTCHN_TYPE_PIRQ, vector + i);
1472 KASSERT(isrc != NULL,
1473 ("xen: unable to allocate isrc for interrupt"));
1474 isrc->xi_pirq = msi_irq.pirq + i;
1475 /* MSI interrupts are always edge triggered */
1476 isrc->xi_edgetrigger = 1;
1478 mtx_unlock(&xen_intr_isrc_lock);
1484 xen_release_msi(int vector)
1486 struct physdev_unmap_pirq unmap;
1487 struct xenisrc *isrc;
1490 isrc = (struct xenisrc *)intr_lookup_source(vector);
1494 unmap.pirq = isrc->xi_pirq;
1495 ret = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap);
1499 xen_intr_release_isrc(isrc);
1505 xen_intr_describe(xen_intr_handle_t port_handle, const char *fmt, ...)
1507 char descr[MAXCOMLEN + 1];
1508 struct xenisrc *isrc;
1511 isrc = xen_intr_isrc(port_handle);
1516 vsnprintf(descr, sizeof(descr), fmt, ap);
1518 return (intr_describe(isrc->xi_vector, isrc->xi_cookie, descr));
1522 xen_intr_unbind(xen_intr_handle_t *port_handlep)
1524 struct xenisrc *isrc;
1526 KASSERT(port_handlep != NULL,
1527 ("NULL xen_intr_handle_t passed to xen_intr_unbind"));
1529 isrc = xen_intr_isrc(*port_handlep);
1530 *port_handlep = NULL;
1534 mtx_lock(&xen_intr_isrc_lock);
1535 if (refcount_release(&isrc->xi_refcount) == 0) {
1536 mtx_unlock(&xen_intr_isrc_lock);
1539 mtx_unlock(&xen_intr_isrc_lock);
1541 if (isrc->xi_cookie != NULL)
1542 intr_remove_handler(isrc->xi_cookie);
1543 xen_intr_release_isrc(isrc);
1547 xen_intr_signal(xen_intr_handle_t handle)
1549 struct xenisrc *isrc;
1551 isrc = xen_intr_isrc(handle);
1553 KASSERT(isrc->xi_type == EVTCHN_TYPE_PORT ||
1554 isrc->xi_type == EVTCHN_TYPE_IPI,
1555 ("evtchn_signal on something other than a local port"));
1556 struct evtchn_send send = { .port = isrc->xi_port };
1557 (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
1562 xen_intr_port(xen_intr_handle_t handle)
1564 struct xenisrc *isrc;
1566 isrc = xen_intr_isrc(handle);
1570 return (isrc->xi_port);
1574 xen_intr_add_handler(const char *name, driver_filter_t filter,
1575 driver_intr_t handler, void *arg, enum intr_type flags,
1576 xen_intr_handle_t handle)
1578 struct xenisrc *isrc;
1581 isrc = xen_intr_isrc(handle);
1582 if (isrc == NULL || isrc->xi_cookie != NULL)
1585 error = intr_add_handler(name, isrc->xi_vector,filter, handler, arg,
1586 flags|INTR_EXCL, &isrc->xi_cookie, 0);
1589 "%s: xen_intr_add_handler: intr_add_handler failed: %d\n",
1597 xen_intr_get_evtchn_from_port(evtchn_port_t port, xen_intr_handle_t *handlep)
1600 if (!is_valid_evtchn(port) || port >= NR_EVENT_CHANNELS)
1603 if (handlep == NULL) {
1607 mtx_lock(&xen_intr_isrc_lock);
1608 if (xen_intr_port_to_isrc[port] == NULL) {
1609 mtx_unlock(&xen_intr_isrc_lock);
1612 refcount_acquire(&xen_intr_port_to_isrc[port]->xi_refcount);
1613 mtx_unlock(&xen_intr_isrc_lock);
1615 /* Assign the opaque handler (the event channel port) */
1616 *handlep = &xen_intr_port_to_isrc[port]->xi_vector;
1623 xen_intr_print_type(enum evtchn_type type)
1625 static const char *evtchn_type_to_string[EVTCHN_TYPE_COUNT] = {
1626 [EVTCHN_TYPE_UNBOUND] = "UNBOUND",
1627 [EVTCHN_TYPE_PIRQ] = "PIRQ",
1628 [EVTCHN_TYPE_VIRQ] = "VIRQ",
1629 [EVTCHN_TYPE_IPI] = "IPI",
1630 [EVTCHN_TYPE_PORT] = "PORT",
1633 if (type >= EVTCHN_TYPE_COUNT)
1636 return (evtchn_type_to_string[type]);
1640 xen_intr_dump_port(struct xenisrc *isrc)
1642 struct xen_intr_pcpu_data *pcpu;
1643 shared_info_t *s = HYPERVISOR_shared_info;
1646 db_printf("Port %d Type: %s\n",
1647 isrc->xi_port, xen_intr_print_type(isrc->xi_type));
1648 if (isrc->xi_type == EVTCHN_TYPE_PIRQ) {
1649 db_printf("\tPirq: %d ActiveHi: %d EdgeTrigger: %d "
1651 isrc->xi_pirq, isrc->xi_activehi, isrc->xi_edgetrigger,
1652 !!xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map));
1654 if (isrc->xi_type == EVTCHN_TYPE_VIRQ)
1655 db_printf("\tVirq: %d\n", isrc->xi_virq);
1657 db_printf("\tMasked: %d Pending: %d\n",
1658 !!xen_test_bit(isrc->xi_port, &s->evtchn_mask[0]),
1659 !!xen_test_bit(isrc->xi_port, &s->evtchn_pending[0]));
1661 db_printf("\tPer-CPU Masks: ");
1663 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
1664 db_printf("cpu#%d: %d ", i,
1665 !!xen_test_bit(isrc->xi_port, pcpu->evtchn_enabled));
1670 DB_SHOW_COMMAND(xen_evtchn, db_show_xen_evtchn)
1674 if (!xen_domain()) {
1675 db_printf("Only available on Xen guests\n");
1679 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1680 struct xenisrc *isrc;
1682 isrc = xen_intr_port_to_isrc[i];
1686 xen_intr_dump_port(isrc);