1 /******************************************************************************
4 * Xen event and interrupt services for x86 HVM guests.
6 * Copyright (c) 2002-2005, K A Fraser
7 * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
8 * Copyright (c) 2012, Spectra Logic Corporation
10 * This file may be distributed separately from the Linux kernel, or
11 * incorporated into other software packages, subject to the following license:
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
38 #include <sys/systm.h>
40 #include <sys/malloc.h>
41 #include <sys/kernel.h>
42 #include <sys/limits.h>
44 #include <sys/mutex.h>
45 #include <sys/interrupt.h>
48 #include <sys/refcount.h>
53 #include <machine/intr_machdep.h>
54 #include <x86/apicvar.h>
55 #include <x86/apicreg.h>
56 #include <machine/smp.h>
57 #include <machine/stdarg.h>
59 #include <machine/xen/synch_bitops.h>
60 #include <machine/xen/xen-os.h>
62 #include <xen/hypervisor.h>
63 #include <xen/xen_intr.h>
64 #include <xen/evtchn/evtchnvar.h>
66 #include <dev/xen/xenpci/xenpcivar.h>
67 #include <dev/pci/pcivar.h>
73 static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services");
76 * Per-cpu event channel processing state.
78 struct xen_intr_pcpu_data {
80 * The last event channel bitmap section (level one bit) processed.
81 * This is used to ensure we scan all ports before
82 * servicing an already servied port again.
84 u_int last_processed_l1i;
87 * The last event channel processed within the event channel
88 * bitmap being scanned.
90 u_int last_processed_l2i;
92 /** Pointer to this CPU's interrupt statistic counter. */
93 u_long *evtchn_intrcnt;
96 * A bitmap of ports that can be serviced from this CPU.
97 * A set bit means interrupt handling is enabled.
99 u_long evtchn_enabled[sizeof(u_long) * 8];
103 * Start the scan at port 0 by initializing the last scanned
104 * location as the highest numbered event channel port.
106 static DPCPU_DEFINE(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
107 .last_processed_l1i = LONG_BIT - 1,
108 .last_processed_l2i = LONG_BIT - 1
111 DPCPU_DECLARE(struct vcpu_info *, vcpu_info);
113 #define XEN_EEXIST 17 /* Xen "already exists" error */
114 #define XEN_ALLOCATE_VECTOR 0 /* Allocate a vector for this event channel */
115 #define XEN_INVALID_EVTCHN 0 /* Invalid event channel */
117 #define is_valid_evtchn(x) ((x) != XEN_INVALID_EVTCHN)
120 struct intsrc xi_intsrc;
121 enum evtchn_type xi_type;
122 int xi_cpu; /* VCPU for delivery. */
123 int xi_vector; /* Global isrc vector number. */
124 evtchn_port_t xi_port;
128 u_int xi_close:1; /* close on unbind? */
130 u_int xi_edgetrigger:1;
132 volatile u_int xi_refcount;
135 static void xen_intr_suspend(struct pic *);
136 static void xen_intr_resume(struct pic *, bool suspend_cancelled);
137 static void xen_intr_enable_source(struct intsrc *isrc);
138 static void xen_intr_disable_source(struct intsrc *isrc, int eoi);
139 static void xen_intr_eoi_source(struct intsrc *isrc);
140 static void xen_intr_enable_intr(struct intsrc *isrc);
141 static void xen_intr_disable_intr(struct intsrc *isrc);
142 static int xen_intr_vector(struct intsrc *isrc);
143 static int xen_intr_source_pending(struct intsrc *isrc);
144 static int xen_intr_config_intr(struct intsrc *isrc,
145 enum intr_trigger trig, enum intr_polarity pol);
146 static int xen_intr_assign_cpu(struct intsrc *isrc, u_int apic_id);
148 static void xen_intr_pirq_enable_source(struct intsrc *isrc);
149 static void xen_intr_pirq_disable_source(struct intsrc *isrc, int eoi);
150 static void xen_intr_pirq_eoi_source(struct intsrc *isrc);
151 static void xen_intr_pirq_enable_intr(struct intsrc *isrc);
152 static void xen_intr_pirq_disable_intr(struct intsrc *isrc);
153 static int xen_intr_pirq_config_intr(struct intsrc *isrc,
154 enum intr_trigger trig, enum intr_polarity pol);
157 * PIC interface for all event channel port types except physical IRQs.
159 struct pic xen_intr_pic = {
160 .pic_enable_source = xen_intr_enable_source,
161 .pic_disable_source = xen_intr_disable_source,
162 .pic_eoi_source = xen_intr_eoi_source,
163 .pic_enable_intr = xen_intr_enable_intr,
164 .pic_disable_intr = xen_intr_disable_intr,
165 .pic_vector = xen_intr_vector,
166 .pic_source_pending = xen_intr_source_pending,
167 .pic_suspend = xen_intr_suspend,
168 .pic_resume = xen_intr_resume,
169 .pic_config_intr = xen_intr_config_intr,
170 .pic_assign_cpu = xen_intr_assign_cpu
174 * PIC interface for all event channel representing
175 * physical interrupt sources.
177 struct pic xen_intr_pirq_pic = {
178 .pic_enable_source = xen_intr_pirq_enable_source,
179 .pic_disable_source = xen_intr_pirq_disable_source,
180 .pic_eoi_source = xen_intr_pirq_eoi_source,
181 .pic_enable_intr = xen_intr_pirq_enable_intr,
182 .pic_disable_intr = xen_intr_pirq_disable_intr,
183 .pic_vector = xen_intr_vector,
184 .pic_source_pending = xen_intr_source_pending,
185 .pic_config_intr = xen_intr_pirq_config_intr,
186 .pic_assign_cpu = xen_intr_assign_cpu
189 static struct mtx xen_intr_isrc_lock;
190 static int xen_intr_auto_vector_count;
191 static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS];
192 static u_long *xen_intr_pirq_eoi_map;
193 static boolean_t xen_intr_pirq_eoi_map_enabled;
195 /*------------------------- Private Functions --------------------------------*/
197 * Disable signal delivery for an event channel port on the
200 * \param port The event channel port to mask.
202 * This API is used to manage the port<=>CPU binding of event
205 * \note This operation does not preclude reception of an event
206 * for this event channel on another CPU. To mask the
207 * event channel globally, use evtchn_mask().
210 evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port)
212 struct xen_intr_pcpu_data *pcpu;
214 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
215 xen_clear_bit(port, pcpu->evtchn_enabled);
219 * Enable signal delivery for an event channel port on the
222 * \param port The event channel port to unmask.
224 * This API is used to manage the port<=>CPU binding of event
227 * \note This operation does not guarantee that event delivery
228 * is enabled for this event channel port. The port must
229 * also be globally enabled. See evtchn_unmask().
232 evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port)
234 struct xen_intr_pcpu_data *pcpu;
236 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
237 xen_set_bit(port, pcpu->evtchn_enabled);
241 * Allocate and register a per-cpu Xen upcall interrupt counter.
243 * \param cpu The cpu for which to register this interrupt count.
246 xen_intr_intrcnt_add(u_int cpu)
248 char buf[MAXCOMLEN + 1];
249 struct xen_intr_pcpu_data *pcpu;
251 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
252 if (pcpu->evtchn_intrcnt != NULL)
255 snprintf(buf, sizeof(buf), "cpu%d:xen", cpu);
256 intrcnt_add(buf, &pcpu->evtchn_intrcnt);
260 * Search for an already allocated but currently unused Xen interrupt
263 * \param type Restrict the search to interrupt sources of the given
266 * \return A pointer to a free Xen interrupt source object or NULL.
268 static struct xenisrc *
269 xen_intr_find_unused_isrc(enum evtchn_type type)
273 KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn isrc lock not held"));
275 for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx ++) {
276 struct xenisrc *isrc;
279 vector = FIRST_EVTCHN_INT + isrc_idx;
280 isrc = (struct xenisrc *)intr_lookup_source(vector);
282 && isrc->xi_type == EVTCHN_TYPE_UNBOUND) {
283 KASSERT(isrc->xi_intsrc.is_handlers == 0,
284 ("Free evtchn still has handlers"));
285 isrc->xi_type = type;
293 * Allocate a Xen interrupt source object.
295 * \param type The type of interrupt source to create.
297 * \return A pointer to a newly allocated Xen interrupt source
300 static struct xenisrc *
301 xen_intr_alloc_isrc(enum evtchn_type type, int vector)
304 struct xenisrc *isrc;
306 KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn alloc lock not held"));
308 if (xen_intr_auto_vector_count > NR_EVENT_CHANNELS) {
311 printf("xen_intr_alloc: Event channels exhausted.\n");
316 if (type != EVTCHN_TYPE_PIRQ) {
317 vector = FIRST_EVTCHN_INT + xen_intr_auto_vector_count;
318 xen_intr_auto_vector_count++;
321 KASSERT((intr_lookup_source(vector) == NULL),
322 ("Trying to use an already allocated vector"));
324 mtx_unlock(&xen_intr_isrc_lock);
325 isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO);
326 isrc->xi_intsrc.is_pic =
327 (type == EVTCHN_TYPE_PIRQ) ? &xen_intr_pirq_pic : &xen_intr_pic;
328 isrc->xi_vector = vector;
329 isrc->xi_type = type;
330 intr_register_source(&isrc->xi_intsrc);
331 mtx_lock(&xen_intr_isrc_lock);
337 * Attempt to free an active Xen interrupt source object.
339 * \param isrc The interrupt source object to release.
341 * \returns EBUSY if the source is still in use, otherwise 0.
344 xen_intr_release_isrc(struct xenisrc *isrc)
347 mtx_lock(&xen_intr_isrc_lock);
348 KASSERT(isrc->xi_intsrc.is_handlers == 0,
349 ("Release called, but xenisrc still in use"));
350 evtchn_mask_port(isrc->xi_port);
351 evtchn_clear_port(isrc->xi_port);
353 /* Rebind port to CPU 0. */
354 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
355 evtchn_cpu_unmask_port(0, isrc->xi_port);
357 if (isrc->xi_close != 0 && is_valid_evtchn(isrc->xi_port)) {
358 struct evtchn_close close = { .port = isrc->xi_port };
359 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
360 panic("EVTCHNOP_close failed");
363 xen_intr_port_to_isrc[isrc->xi_port] = NULL;
365 isrc->xi_type = EVTCHN_TYPE_UNBOUND;
367 isrc->xi_cookie = NULL;
368 mtx_unlock(&xen_intr_isrc_lock);
373 * Associate an interrupt handler with an already allocated local Xen
374 * event channel port.
376 * \param isrcp The returned Xen interrupt object associated with
377 * the specified local port.
378 * \param local_port The event channel to bind.
379 * \param type The event channel type of local_port.
380 * \param intr_owner The device making this bind request.
381 * \param filter An interrupt filter handler. Specify NULL
382 * to always dispatch to the ithread handler.
383 * \param handler An interrupt ithread handler. Optional (can
384 * specify NULL) if all necessary event actions
385 * are performed by filter.
386 * \param arg Argument to present to both filter and handler.
387 * \param irqflags Interrupt handler flags. See sys/bus.h.
388 * \param handlep Pointer to an opaque handle used to manage this
391 * \returns 0 on success, otherwise an errno.
394 xen_intr_bind_isrc(struct xenisrc **isrcp, evtchn_port_t local_port,
395 enum evtchn_type type, const char *intr_owner, driver_filter_t filter,
396 driver_intr_t handler, void *arg, enum intr_type flags,
397 xen_intr_handle_t *port_handlep)
399 struct xenisrc *isrc;
403 if (port_handlep == NULL) {
404 printf("%s: xen_intr_bind_isrc: Bad event handle\n",
409 mtx_lock(&xen_intr_isrc_lock);
410 isrc = xen_intr_find_unused_isrc(type);
412 isrc = xen_intr_alloc_isrc(type, XEN_ALLOCATE_VECTOR);
414 mtx_unlock(&xen_intr_isrc_lock);
418 isrc->xi_port = local_port;
419 xen_intr_port_to_isrc[local_port] = isrc;
420 refcount_init(&isrc->xi_refcount, 1);
421 mtx_unlock(&xen_intr_isrc_lock);
423 /* Assign the opaque handler (the event channel port) */
424 *port_handlep = &isrc->xi_vector;
427 if (type == EVTCHN_TYPE_PORT) {
429 * By default all interrupts are assigned to vCPU#0
430 * unless specified otherwise, so shuffle them to balance
431 * the interrupt load.
433 xen_intr_assign_cpu(&isrc->xi_intsrc, intr_next_cpu(0));
437 if (filter == NULL && handler == NULL) {
439 * No filter/handler provided, leave the event channel
440 * masked and without a valid handler, the caller is
441 * in charge of setting that up.
447 error = xen_intr_add_handler(intr_owner, filter, handler, arg, flags,
450 xen_intr_release_isrc(isrc);
458 * Lookup a Xen interrupt source object given an interrupt binding handle.
460 * \param handle A handle initialized by a previous call to
461 * xen_intr_bind_isrc().
463 * \returns A pointer to the Xen interrupt source object associated
464 * with the given interrupt handle. NULL if no association
467 static struct xenisrc *
468 xen_intr_isrc(xen_intr_handle_t handle)
475 vector = *(int *)handle;
476 KASSERT(vector >= FIRST_EVTCHN_INT &&
477 vector < (FIRST_EVTCHN_INT + xen_intr_auto_vector_count),
478 ("Xen interrupt vector is out of range"));
480 return ((struct xenisrc *)intr_lookup_source(vector));
484 * Determine the event channel ports at the given section of the
485 * event port bitmap which have pending events for the given cpu.
487 * \param pcpu The Xen interrupt pcpu data for the cpu being querried.
488 * \param sh The Xen shared info area.
489 * \param idx The index of the section of the event channel bitmap to
492 * \returns A u_long with bits set for every event channel with pending
496 xen_intr_active_ports(struct xen_intr_pcpu_data *pcpu, shared_info_t *sh,
500 CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(sh->evtchn_pending[0]));
501 CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(pcpu->evtchn_enabled[0]));
502 CTASSERT(sizeof(sh->evtchn_mask) == sizeof(sh->evtchn_pending));
503 CTASSERT(sizeof(sh->evtchn_mask) == sizeof(pcpu->evtchn_enabled));
504 return (sh->evtchn_pending[idx]
505 & ~sh->evtchn_mask[idx]
506 & pcpu->evtchn_enabled[idx]);
510 * Interrupt handler for processing all Xen event channel events.
512 * \param trap_frame The trap frame context for the current interrupt.
515 xen_intr_handle_upcall(struct trapframe *trap_frame)
517 u_int l1i, l2i, port, cpu;
518 u_long masked_l1, masked_l2;
519 struct xenisrc *isrc;
522 struct xen_intr_pcpu_data *pc;
526 * Disable preemption in order to always check and fire events
531 cpu = PCPU_GET(cpuid);
532 pc = DPCPU_PTR(xen_intr_pcpu);
533 s = HYPERVISOR_shared_info;
534 v = DPCPU_GET(vcpu_info);
536 if (xen_hvm_domain() && !xen_vector_callback_enabled) {
537 KASSERT((cpu == 0), ("Fired PCI event callback on wrong CPU"));
540 v->evtchn_upcall_pending = 0;
543 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
544 /* Clear master flag /before/ clearing selector flag. */
549 l1 = atomic_readandclear_long(&v->evtchn_pending_sel);
551 l1i = pc->last_processed_l1i;
552 l2i = pc->last_processed_l2i;
553 (*pc->evtchn_intrcnt)++;
557 l1i = (l1i + 1) % LONG_BIT;
558 masked_l1 = l1 & ((~0UL) << l1i);
560 if (masked_l1 == 0) {
562 * if we masked out all events, wrap around
569 l1i = ffsl(masked_l1) - 1;
572 l2 = xen_intr_active_ports(pc, s, l1i);
574 l2i = (l2i + 1) % LONG_BIT;
575 masked_l2 = l2 & ((~0UL) << l2i);
577 if (masked_l2 == 0) {
578 /* if we masked out all events, move on */
582 l2i = ffsl(masked_l2) - 1;
585 port = (l1i * LONG_BIT) + l2i;
586 synch_clear_bit(port, &s->evtchn_pending[0]);
588 isrc = xen_intr_port_to_isrc[port];
589 if (__predict_false(isrc == NULL))
592 /* Make sure we are firing on the right vCPU */
593 KASSERT((isrc->xi_cpu == PCPU_GET(cpuid)),
594 ("Received unexpected event on vCPU#%d, event bound to vCPU#%d",
595 PCPU_GET(cpuid), isrc->xi_cpu));
597 intr_execute_handlers(&isrc->xi_intsrc, trap_frame);
600 * If this is the final port processed,
601 * we'll pick up here+1 next time.
603 pc->last_processed_l1i = l1i;
604 pc->last_processed_l2i = l2i;
606 } while (l2i != LONG_BIT - 1);
608 l2 = xen_intr_active_ports(pc, s, l1i);
611 * We handled all ports, so we can clear the
621 xen_intr_init(void *dummy __unused)
623 shared_info_t *s = HYPERVISOR_shared_info;
624 struct xen_intr_pcpu_data *pcpu;
625 struct physdev_pirq_eoi_gmfn eoi_gmfn;
631 mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF);
634 * Register interrupt count manually as we aren't
635 * guaranteed to see a call to xen_intr_assign_cpu()
636 * before our first interrupt. Also set the per-cpu
637 * mask of CPU#0 to enable all, since by default
638 * all event channels are bound to CPU#0.
641 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
642 memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
643 sizeof(pcpu->evtchn_enabled));
644 xen_intr_intrcnt_add(i);
647 for (i = 0; i < nitems(s->evtchn_mask); i++)
648 atomic_store_rel_long(&s->evtchn_mask[i], ~0);
650 /* Try to register PIRQ EOI map */
651 xen_intr_pirq_eoi_map = malloc(PAGE_SIZE, M_XENINTR, M_WAITOK | M_ZERO);
652 eoi_gmfn.gmfn = atop(vtophys(xen_intr_pirq_eoi_map));
653 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
654 if (rc != 0 && bootverbose)
655 printf("Xen interrupts: unable to register PIRQ EOI map\n");
657 xen_intr_pirq_eoi_map_enabled = true;
659 intr_register_pic(&xen_intr_pic);
660 intr_register_pic(&xen_intr_pirq_pic);
663 printf("Xen interrupt system initialized\n");
667 SYSINIT(xen_intr_init, SI_SUB_INTR, SI_ORDER_SECOND, xen_intr_init, NULL);
669 /*--------------------------- Common PIC Functions ---------------------------*/
671 * Prepare this PIC for system suspension.
674 xen_intr_suspend(struct pic *unused)
679 xen_rebind_ipi(struct xenisrc *isrc)
682 int cpu = isrc->xi_cpu;
683 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
685 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
687 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
690 panic("unable to rebind xen IPI: %d", error);
692 isrc->xi_port = bind_ipi.port;
694 xen_intr_port_to_isrc[bind_ipi.port] = isrc;
696 error = xen_intr_assign_cpu(&isrc->xi_intsrc,
699 panic("unable to bind xen IPI to CPU#%d: %d",
702 evtchn_unmask_port(bind_ipi.port);
704 panic("Resume IPI event channel on UP");
709 xen_rebind_virq(struct xenisrc *isrc)
711 int cpu = isrc->xi_cpu;
712 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
714 struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq,
717 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
720 panic("unable to rebind xen VIRQ#%d: %d", isrc->xi_virq, error);
722 isrc->xi_port = bind_virq.port;
724 xen_intr_port_to_isrc[bind_virq.port] = isrc;
727 error = xen_intr_assign_cpu(&isrc->xi_intsrc,
730 panic("unable to bind xen VIRQ#%d to CPU#%d: %d",
731 isrc->xi_virq, cpu, error);
734 evtchn_unmask_port(bind_virq.port);
738 * Return this PIC to service after being suspended.
741 xen_intr_resume(struct pic *unused, bool suspend_cancelled)
743 shared_info_t *s = HYPERVISOR_shared_info;
744 struct xenisrc *isrc;
748 if (suspend_cancelled)
751 /* Reset the per-CPU masks */
753 struct xen_intr_pcpu_data *pcpu;
755 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
756 memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
757 sizeof(pcpu->evtchn_enabled));
760 /* Mask all event channels. */
761 for (i = 0; i < nitems(s->evtchn_mask); i++)
762 atomic_store_rel_long(&s->evtchn_mask[i], ~0);
764 /* Remove port -> isrc mappings */
765 memset(xen_intr_port_to_isrc, 0, sizeof(xen_intr_port_to_isrc));
767 /* Free unused isrcs and rebind VIRQs and IPIs */
768 for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx++) {
771 vector = FIRST_EVTCHN_INT + isrc_idx;
772 isrc = (struct xenisrc *)intr_lookup_source(vector);
775 switch (isrc->xi_type) {
776 case EVTCHN_TYPE_IPI:
777 xen_rebind_ipi(isrc);
779 case EVTCHN_TYPE_VIRQ:
780 xen_rebind_virq(isrc);
790 * Disable a Xen interrupt source.
792 * \param isrc The interrupt source to disable.
795 xen_intr_disable_intr(struct intsrc *base_isrc)
797 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
799 evtchn_mask_port(isrc->xi_port);
803 * Determine the global interrupt vector number for
804 * a Xen interrupt source.
806 * \param isrc The interrupt source to query.
808 * \return The vector number corresponding to the given interrupt source.
811 xen_intr_vector(struct intsrc *base_isrc)
813 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
815 return (isrc->xi_vector);
819 * Determine whether or not interrupt events are pending on the
820 * the given interrupt source.
822 * \param isrc The interrupt source to query.
824 * \returns 0 if no events are pending, otherwise non-zero.
827 xen_intr_source_pending(struct intsrc *isrc)
830 * EventChannels are edge triggered and never masked.
831 * There can be no pending events.
837 * Perform configuration of an interrupt source.
839 * \param isrc The interrupt source to configure.
840 * \param trig Edge or level.
841 * \param pol Active high or low.
843 * \returns 0 if no events are pending, otherwise non-zero.
846 xen_intr_config_intr(struct intsrc *isrc, enum intr_trigger trig,
847 enum intr_polarity pol)
849 /* Configuration is only possible via the evtchn apis. */
854 * Configure CPU affinity for interrupt source event delivery.
856 * \param isrc The interrupt source to configure.
857 * \param apic_id The apic id of the CPU for handling future events.
859 * \returns 0 if successful, otherwise an errno.
862 xen_intr_assign_cpu(struct intsrc *base_isrc, u_int apic_id)
865 struct evtchn_bind_vcpu bind_vcpu;
866 struct xenisrc *isrc;
867 u_int to_cpu, vcpu_id;
870 if (xen_vector_callback_enabled == 0)
873 to_cpu = apic_cpuid(apic_id);
874 vcpu_id = pcpu_find(to_cpu)->pc_vcpu_id;
875 xen_intr_intrcnt_add(to_cpu);
877 mtx_lock(&xen_intr_isrc_lock);
878 isrc = (struct xenisrc *)base_isrc;
879 if (!is_valid_evtchn(isrc->xi_port)) {
880 mtx_unlock(&xen_intr_isrc_lock);
885 * Mask the event channel while binding it to prevent interrupt
886 * delivery with an inconsistent state in isrc->xi_cpu.
888 masked = evtchn_test_and_set_mask(isrc->xi_port);
889 if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) ||
890 (isrc->xi_type == EVTCHN_TYPE_IPI)) {
892 * Virtual IRQs are associated with a cpu by
893 * the Hypervisor at evtchn_bind_virq time, so
894 * all we need to do is update the per-CPU masks.
896 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
897 isrc->xi_cpu = to_cpu;
898 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
902 bind_vcpu.port = isrc->xi_port;
903 bind_vcpu.vcpu = vcpu_id;
905 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu);
906 if (isrc->xi_cpu != to_cpu) {
908 /* Commit to new binding by removing the old one. */
909 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
910 isrc->xi_cpu = to_cpu;
911 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
917 evtchn_unmask_port(isrc->xi_port);
918 mtx_unlock(&xen_intr_isrc_lock);
925 /*------------------- Virtual Interrupt Source PIC Functions -----------------*/
927 * Mask a level triggered interrupt source.
929 * \param isrc The interrupt source to mask (if necessary).
930 * \param eoi If non-zero, perform any necessary end-of-interrupt
934 xen_intr_disable_source(struct intsrc *base_isrc, int eoi)
936 struct xenisrc *isrc;
938 isrc = (struct xenisrc *)base_isrc;
941 * NB: checking if the event channel is already masked is
942 * needed because the event channel user-space device
943 * masks event channels on its filter as part of its
944 * normal operation, and those shouldn't be automatically
945 * unmasked by the generic interrupt code. The event channel
946 * device will unmask them when needed.
948 isrc->xi_masked = !!evtchn_test_and_set_mask(isrc->xi_port);
952 * Unmask a level triggered interrupt source.
954 * \param isrc The interrupt source to unmask (if necessary).
957 xen_intr_enable_source(struct intsrc *base_isrc)
959 struct xenisrc *isrc;
961 isrc = (struct xenisrc *)base_isrc;
963 if (isrc->xi_masked == 0)
964 evtchn_unmask_port(isrc->xi_port);
968 * Perform any necessary end-of-interrupt acknowledgements.
970 * \param isrc The interrupt source to EOI.
973 xen_intr_eoi_source(struct intsrc *base_isrc)
978 * Enable and unmask the interrupt source.
980 * \param isrc The interrupt source to enable.
983 xen_intr_enable_intr(struct intsrc *base_isrc)
985 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
987 evtchn_unmask_port(isrc->xi_port);
990 /*------------------ Physical Interrupt Source PIC Functions -----------------*/
992 * Mask a level triggered interrupt source.
994 * \param isrc The interrupt source to mask (if necessary).
995 * \param eoi If non-zero, perform any necessary end-of-interrupt
999 xen_intr_pirq_disable_source(struct intsrc *base_isrc, int eoi)
1001 struct xenisrc *isrc;
1003 isrc = (struct xenisrc *)base_isrc;
1005 if (isrc->xi_edgetrigger == 0)
1006 evtchn_mask_port(isrc->xi_port);
1008 xen_intr_pirq_eoi_source(base_isrc);
1012 * Unmask a level triggered interrupt source.
1014 * \param isrc The interrupt source to unmask (if necessary).
1017 xen_intr_pirq_enable_source(struct intsrc *base_isrc)
1019 struct xenisrc *isrc;
1021 isrc = (struct xenisrc *)base_isrc;
1023 if (isrc->xi_edgetrigger == 0)
1024 evtchn_unmask_port(isrc->xi_port);
1028 * Perform any necessary end-of-interrupt acknowledgements.
1030 * \param isrc The interrupt source to EOI.
1033 xen_intr_pirq_eoi_source(struct intsrc *base_isrc)
1035 struct xenisrc *isrc;
1038 isrc = (struct xenisrc *)base_isrc;
1040 if (xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map)) {
1041 struct physdev_eoi eoi = { .irq = isrc->xi_pirq };
1043 error = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
1045 panic("Unable to EOI PIRQ#%d: %d\n",
1046 isrc->xi_pirq, error);
1051 * Enable and unmask the interrupt source.
1053 * \param isrc The interrupt source to enable.
1056 xen_intr_pirq_enable_intr(struct intsrc *base_isrc)
1058 struct xenisrc *isrc;
1059 struct evtchn_bind_pirq bind_pirq;
1060 struct physdev_irq_status_query irq_status;
1063 isrc = (struct xenisrc *)base_isrc;
1065 if (!xen_intr_pirq_eoi_map_enabled) {
1066 irq_status.irq = isrc->xi_pirq;
1067 error = HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query,
1070 panic("unable to get status of IRQ#%d", isrc->xi_pirq);
1072 if (irq_status.flags & XENIRQSTAT_needs_eoi) {
1074 * Since the dynamic PIRQ EOI map is not available
1075 * mark the PIRQ as needing EOI unconditionally.
1077 xen_set_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map);
1081 bind_pirq.pirq = isrc->xi_pirq;
1082 bind_pirq.flags = isrc->xi_edgetrigger ? 0 : BIND_PIRQ__WILL_SHARE;
1083 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
1085 panic("unable to bind IRQ#%d", isrc->xi_pirq);
1087 isrc->xi_port = bind_pirq.port;
1089 mtx_lock(&xen_intr_isrc_lock);
1090 KASSERT((xen_intr_port_to_isrc[bind_pirq.port] == NULL),
1091 ("trying to override an already setup event channel port"));
1092 xen_intr_port_to_isrc[bind_pirq.port] = isrc;
1093 mtx_unlock(&xen_intr_isrc_lock);
1095 evtchn_unmask_port(isrc->xi_port);
1099 * Disable an interrupt source.
1101 * \param isrc The interrupt source to disable.
1104 xen_intr_pirq_disable_intr(struct intsrc *base_isrc)
1106 struct xenisrc *isrc;
1107 struct evtchn_close close;
1110 isrc = (struct xenisrc *)base_isrc;
1112 evtchn_mask_port(isrc->xi_port);
1114 close.port = isrc->xi_port;
1115 error = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
1117 panic("unable to close event channel %d IRQ#%d",
1118 isrc->xi_port, isrc->xi_pirq);
1120 mtx_lock(&xen_intr_isrc_lock);
1121 xen_intr_port_to_isrc[isrc->xi_port] = NULL;
1122 mtx_unlock(&xen_intr_isrc_lock);
1128 * Perform configuration of an interrupt source.
1130 * \param isrc The interrupt source to configure.
1131 * \param trig Edge or level.
1132 * \param pol Active high or low.
1134 * \returns 0 if no events are pending, otherwise non-zero.
1137 xen_intr_pirq_config_intr(struct intsrc *base_isrc, enum intr_trigger trig,
1138 enum intr_polarity pol)
1140 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
1141 struct physdev_setup_gsi setup_gsi;
1144 KASSERT(!(trig == INTR_TRIGGER_CONFORM || pol == INTR_POLARITY_CONFORM),
1145 ("%s: Conforming trigger or polarity\n", __func__));
1147 setup_gsi.gsi = isrc->xi_pirq;
1148 setup_gsi.triggering = trig == INTR_TRIGGER_EDGE ? 0 : 1;
1149 setup_gsi.polarity = pol == INTR_POLARITY_HIGH ? 0 : 1;
1151 error = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
1152 if (error == -XEN_EEXIST) {
1153 if ((isrc->xi_edgetrigger && (trig != INTR_TRIGGER_EDGE)) ||
1154 (isrc->xi_activehi && (pol != INTR_POLARITY_HIGH)))
1155 panic("unable to reconfigure interrupt IRQ#%d",
1160 panic("unable to configure IRQ#%d\n", isrc->xi_pirq);
1162 isrc->xi_activehi = pol == INTR_POLARITY_HIGH ? 1 : 0;
1163 isrc->xi_edgetrigger = trig == INTR_TRIGGER_EDGE ? 1 : 0;
1168 /*--------------------------- Public Functions -------------------------------*/
1169 /*------- API comments for these methods can be found in xen/xenintr.h -------*/
1171 xen_intr_bind_local_port(device_t dev, evtchn_port_t local_port,
1172 driver_filter_t filter, driver_intr_t handler, void *arg,
1173 enum intr_type flags, xen_intr_handle_t *port_handlep)
1175 struct xenisrc *isrc;
1178 error = xen_intr_bind_isrc(&isrc, local_port, EVTCHN_TYPE_PORT,
1179 device_get_nameunit(dev), filter, handler, arg, flags,
1185 * The Event Channel API didn't open this port, so it is not
1186 * responsible for closing it automatically on unbind.
1193 xen_intr_alloc_and_bind_local_port(device_t dev, u_int remote_domain,
1194 driver_filter_t filter, driver_intr_t handler, void *arg,
1195 enum intr_type flags, xen_intr_handle_t *port_handlep)
1197 struct xenisrc *isrc;
1198 struct evtchn_alloc_unbound alloc_unbound;
1201 alloc_unbound.dom = DOMID_SELF;
1202 alloc_unbound.remote_dom = remote_domain;
1203 error = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
1207 * XXX Trap Hypercall error code Linuxisms in
1208 * the HYPERCALL layer.
1213 error = xen_intr_bind_isrc(&isrc, alloc_unbound.port, EVTCHN_TYPE_PORT,
1214 device_get_nameunit(dev), filter, handler, arg, flags,
1217 evtchn_close_t close = { .port = alloc_unbound.port };
1218 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1219 panic("EVTCHNOP_close failed");
1228 xen_intr_bind_remote_port(device_t dev, u_int remote_domain,
1229 u_int remote_port, driver_filter_t filter, driver_intr_t handler,
1230 void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep)
1232 struct xenisrc *isrc;
1233 struct evtchn_bind_interdomain bind_interdomain;
1236 bind_interdomain.remote_dom = remote_domain;
1237 bind_interdomain.remote_port = remote_port;
1238 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
1242 * XXX Trap Hypercall error code Linuxisms in
1243 * the HYPERCALL layer.
1248 error = xen_intr_bind_isrc(&isrc, bind_interdomain.local_port,
1249 EVTCHN_TYPE_PORT, device_get_nameunit(dev), filter, handler, arg,
1250 flags, port_handlep);
1252 evtchn_close_t close = { .port = bind_interdomain.local_port };
1253 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1254 panic("EVTCHNOP_close failed");
1259 * The Event Channel API opened this port, so it is
1260 * responsible for closing it automatically on unbind.
1267 xen_intr_bind_virq(device_t dev, u_int virq, u_int cpu,
1268 driver_filter_t filter, driver_intr_t handler, void *arg,
1269 enum intr_type flags, xen_intr_handle_t *port_handlep)
1271 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
1272 struct xenisrc *isrc;
1273 struct evtchn_bind_virq bind_virq = { .virq = virq, .vcpu = vcpu_id };
1276 /* Ensure the target CPU is ready to handle evtchn interrupts. */
1277 xen_intr_intrcnt_add(cpu);
1280 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
1283 * XXX Trap Hypercall error code Linuxisms in
1284 * the HYPERCALL layer.
1289 error = xen_intr_bind_isrc(&isrc, bind_virq.port, EVTCHN_TYPE_VIRQ,
1290 device_get_nameunit(dev), filter, handler, arg, flags,
1295 error = intr_event_bind(isrc->xi_intsrc.is_event, cpu);
1299 evtchn_close_t close = { .port = bind_virq.port };
1301 xen_intr_unbind(*port_handlep);
1302 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1303 panic("EVTCHNOP_close failed");
1308 if (isrc->xi_cpu != cpu) {
1310 * Too early in the boot process for the generic interrupt
1311 * code to perform the binding. Update our event channel
1312 * masks manually so events can't fire on the wrong cpu
1313 * during AP startup.
1315 xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
1320 * The Event Channel API opened this port, so it is
1321 * responsible for closing it automatically on unbind.
1324 isrc->xi_virq = virq;
1330 xen_intr_alloc_and_bind_ipi(u_int cpu, driver_filter_t filter,
1331 enum intr_type flags, xen_intr_handle_t *port_handlep)
1334 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
1335 struct xenisrc *isrc;
1336 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
1337 /* Same size as the one used by intr_handler->ih_name. */
1338 char name[MAXCOMLEN + 1];
1341 /* Ensure the target CPU is ready to handle evtchn interrupts. */
1342 xen_intr_intrcnt_add(cpu);
1345 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
1348 * XXX Trap Hypercall error code Linuxisms in
1349 * the HYPERCALL layer.
1354 snprintf(name, sizeof(name), "cpu%u", cpu);
1356 error = xen_intr_bind_isrc(&isrc, bind_ipi.port, EVTCHN_TYPE_IPI,
1357 name, filter, NULL, NULL, flags, port_handlep);
1359 evtchn_close_t close = { .port = bind_ipi.port };
1361 xen_intr_unbind(*port_handlep);
1362 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1363 panic("EVTCHNOP_close failed");
1367 if (isrc->xi_cpu != cpu) {
1369 * Too early in the boot process for the generic interrupt
1370 * code to perform the binding. Update our event channel
1371 * masks manually so events can't fire on the wrong cpu
1372 * during AP startup.
1374 xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
1378 * The Event Channel API opened this port, so it is
1379 * responsible for closing it automatically on unbind.
1384 return (EOPNOTSUPP);
1389 xen_register_pirq(int vector, enum intr_trigger trig, enum intr_polarity pol)
1391 struct physdev_map_pirq map_pirq;
1392 struct xenisrc *isrc;
1399 printf("xen: register IRQ#%d\n", vector);
1401 map_pirq.domid = DOMID_SELF;
1402 map_pirq.type = MAP_PIRQ_TYPE_GSI;
1403 map_pirq.index = vector;
1404 map_pirq.pirq = vector;
1406 error = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_pirq);
1408 printf("xen: unable to map IRQ#%d\n", vector);
1412 mtx_lock(&xen_intr_isrc_lock);
1413 isrc = xen_intr_alloc_isrc(EVTCHN_TYPE_PIRQ, vector);
1414 mtx_unlock(&xen_intr_isrc_lock);
1415 KASSERT((isrc != NULL), ("xen: unable to allocate isrc for interrupt"));
1416 isrc->xi_pirq = vector;
1417 isrc->xi_activehi = pol == INTR_POLARITY_HIGH ? 1 : 0;
1418 isrc->xi_edgetrigger = trig == INTR_TRIGGER_EDGE ? 1 : 0;
1424 xen_register_msi(device_t dev, int vector, int count)
1426 struct physdev_map_pirq msi_irq;
1427 struct xenisrc *isrc;
1430 memset(&msi_irq, 0, sizeof(msi_irq));
1431 msi_irq.domid = DOMID_SELF;
1432 msi_irq.type = count == 1 ?
1433 MAP_PIRQ_TYPE_MSI_SEG : MAP_PIRQ_TYPE_MULTI_MSI;
1436 msi_irq.bus = pci_get_bus(dev) | (pci_get_domain(dev) << 16);
1437 msi_irq.devfn = (pci_get_slot(dev) << 3) | pci_get_function(dev);
1438 msi_irq.entry_nr = count;
1440 ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &msi_irq);
1443 if (count != msi_irq.entry_nr) {
1444 panic("unable to setup all requested MSI vectors "
1445 "(expected %d got %d)", count, msi_irq.entry_nr);
1448 mtx_lock(&xen_intr_isrc_lock);
1449 for (int i = 0; i < count; i++) {
1450 isrc = xen_intr_alloc_isrc(EVTCHN_TYPE_PIRQ, vector + i);
1451 KASSERT(isrc != NULL,
1452 ("xen: unable to allocate isrc for interrupt"));
1453 isrc->xi_pirq = msi_irq.pirq + i;
1454 /* MSI interrupts are always edge triggered */
1455 isrc->xi_edgetrigger = 1;
1457 mtx_unlock(&xen_intr_isrc_lock);
1463 xen_release_msi(int vector)
1465 struct physdev_unmap_pirq unmap;
1466 struct xenisrc *isrc;
1469 isrc = (struct xenisrc *)intr_lookup_source(vector);
1473 unmap.pirq = isrc->xi_pirq;
1474 ret = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap);
1478 xen_intr_release_isrc(isrc);
1484 xen_intr_describe(xen_intr_handle_t port_handle, const char *fmt, ...)
1486 char descr[MAXCOMLEN + 1];
1487 struct xenisrc *isrc;
1490 isrc = xen_intr_isrc(port_handle);
1495 vsnprintf(descr, sizeof(descr), fmt, ap);
1497 return (intr_describe(isrc->xi_vector, isrc->xi_cookie, descr));
1501 xen_intr_unbind(xen_intr_handle_t *port_handlep)
1503 struct xenisrc *isrc;
1505 KASSERT(port_handlep != NULL,
1506 ("NULL xen_intr_handle_t passed to xen_intr_unbind"));
1508 isrc = xen_intr_isrc(*port_handlep);
1509 *port_handlep = NULL;
1513 mtx_lock(&xen_intr_isrc_lock);
1514 if (refcount_release(&isrc->xi_refcount) == 0) {
1515 mtx_unlock(&xen_intr_isrc_lock);
1518 mtx_unlock(&xen_intr_isrc_lock);
1520 if (isrc->xi_cookie != NULL)
1521 intr_remove_handler(isrc->xi_cookie);
1522 xen_intr_release_isrc(isrc);
1526 xen_intr_signal(xen_intr_handle_t handle)
1528 struct xenisrc *isrc;
1530 isrc = xen_intr_isrc(handle);
1532 KASSERT(isrc->xi_type == EVTCHN_TYPE_PORT ||
1533 isrc->xi_type == EVTCHN_TYPE_IPI,
1534 ("evtchn_signal on something other than a local port"));
1535 struct evtchn_send send = { .port = isrc->xi_port };
1536 (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
1541 xen_intr_port(xen_intr_handle_t handle)
1543 struct xenisrc *isrc;
1545 isrc = xen_intr_isrc(handle);
1549 return (isrc->xi_port);
1553 xen_intr_add_handler(const char *name, driver_filter_t filter,
1554 driver_intr_t handler, void *arg, enum intr_type flags,
1555 xen_intr_handle_t handle)
1557 struct xenisrc *isrc;
1560 isrc = xen_intr_isrc(handle);
1561 if (isrc == NULL || isrc->xi_cookie != NULL)
1564 error = intr_add_handler(name, isrc->xi_vector,filter, handler, arg,
1565 flags|INTR_EXCL, &isrc->xi_cookie, 0);
1568 "%s: xen_intr_add_handler: intr_add_handler failed: %d\n",
1576 xen_intr_get_evtchn_from_port(evtchn_port_t port, xen_intr_handle_t *handlep)
1579 if (!is_valid_evtchn(port) || port >= NR_EVENT_CHANNELS)
1582 if (handlep == NULL) {
1586 mtx_lock(&xen_intr_isrc_lock);
1587 if (xen_intr_port_to_isrc[port] == NULL) {
1588 mtx_unlock(&xen_intr_isrc_lock);
1591 refcount_acquire(&xen_intr_port_to_isrc[port]->xi_refcount);
1592 mtx_unlock(&xen_intr_isrc_lock);
1594 /* Assign the opaque handler (the event channel port) */
1595 *handlep = &xen_intr_port_to_isrc[port]->xi_vector;
1602 xen_intr_print_type(enum evtchn_type type)
1604 static const char *evtchn_type_to_string[EVTCHN_TYPE_COUNT] = {
1605 [EVTCHN_TYPE_UNBOUND] = "UNBOUND",
1606 [EVTCHN_TYPE_PIRQ] = "PIRQ",
1607 [EVTCHN_TYPE_VIRQ] = "VIRQ",
1608 [EVTCHN_TYPE_IPI] = "IPI",
1609 [EVTCHN_TYPE_PORT] = "PORT",
1612 if (type >= EVTCHN_TYPE_COUNT)
1615 return (evtchn_type_to_string[type]);
1619 xen_intr_dump_port(struct xenisrc *isrc)
1621 struct xen_intr_pcpu_data *pcpu;
1622 shared_info_t *s = HYPERVISOR_shared_info;
1625 db_printf("Port %d Type: %s\n",
1626 isrc->xi_port, xen_intr_print_type(isrc->xi_type));
1627 if (isrc->xi_type == EVTCHN_TYPE_PIRQ) {
1628 db_printf("\tPirq: %d ActiveHi: %d EdgeTrigger: %d "
1630 isrc->xi_pirq, isrc->xi_activehi, isrc->xi_edgetrigger,
1631 !!xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map));
1633 if (isrc->xi_type == EVTCHN_TYPE_VIRQ)
1634 db_printf("\tVirq: %d\n", isrc->xi_virq);
1636 db_printf("\tMasked: %d Pending: %d\n",
1637 !!xen_test_bit(isrc->xi_port, &s->evtchn_mask[0]),
1638 !!xen_test_bit(isrc->xi_port, &s->evtchn_pending[0]));
1640 db_printf("\tPer-CPU Masks: ");
1642 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
1643 db_printf("cpu#%d: %d ", i,
1644 !!xen_test_bit(isrc->xi_port, pcpu->evtchn_enabled));
1649 DB_SHOW_COMMAND(xen_evtchn, db_show_xen_evtchn)
1653 if (!xen_domain()) {
1654 db_printf("Only available on Xen guests\n");
1658 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1659 struct xenisrc *isrc;
1661 isrc = xen_intr_port_to_isrc[i];
1665 xen_intr_dump_port(isrc);