1 /******************************************************************************
4 * Xen event and interrupt services for x86 PV and HVM guests.
6 * Copyright (c) 2002-2005, K A Fraser
7 * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
8 * Copyright (c) 2012, Spectra Logic Corporation
10 * This file may be distributed separately from the Linux kernel, or
11 * incorporated into other software packages, subject to the following license:
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
40 #include <sys/limits.h>
42 #include <sys/mutex.h>
43 #include <sys/interrupt.h>
50 #include <machine/intr_machdep.h>
51 #include <machine/apicvar.h>
52 #include <machine/smp.h>
53 #include <machine/stdarg.h>
55 #include <machine/xen/synch_bitops.h>
56 #include <machine/xen/xen-os.h>
57 #include <machine/xen/xenvar.h>
59 #include <xen/hypervisor.h>
60 #include <xen/xen_intr.h>
61 #include <xen/evtchn/evtchnvar.h>
63 #include <dev/xen/xenpci/xenpcivar.h>
65 static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services");
68 * Per-cpu event channel processing state.
70 struct xen_intr_pcpu_data {
72 * The last event channel bitmap section (level one bit) processed.
73 * This is used to ensure we scan all ports before
74 * servicing an already servied port again.
76 u_int last_processed_l1i;
79 * The last event channel processed within the event channel
80 * bitmap being scanned.
82 u_int last_processed_l2i;
84 /** Pointer to this CPU's interrupt statistic counter. */
85 u_long *evtchn_intrcnt;
88 * A bitmap of ports that can be serviced from this CPU.
89 * A set bit means interrupt handling is enabled.
91 u_long evtchn_enabled[sizeof(u_long) * 8];
95 * Start the scan at port 0 by initializing the last scanned
96 * location as the highest numbered event channel port.
98 DPCPU_DEFINE(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
99 .last_processed_l1i = LONG_BIT - 1,
100 .last_processed_l2i = LONG_BIT - 1
103 DPCPU_DECLARE(struct vcpu_info *, vcpu_info);
105 #define is_valid_evtchn(x) ((x) != 0)
108 struct intsrc xi_intsrc;
109 enum evtchn_type xi_type;
110 int xi_cpu; /* VCPU for delivery. */
111 int xi_vector; /* Global isrc vector number. */
112 evtchn_port_t xi_port;
115 u_int xi_close:1; /* close on unbind? */
116 u_int xi_needs_eoi:1;
117 u_int xi_shared:1; /* Shared with other domains. */
120 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
122 static void xen_intr_suspend(struct pic *);
123 static void xen_intr_resume(struct pic *, bool suspend_cancelled);
124 static void xen_intr_enable_source(struct intsrc *isrc);
125 static void xen_intr_disable_source(struct intsrc *isrc, int eoi);
126 static void xen_intr_eoi_source(struct intsrc *isrc);
127 static void xen_intr_enable_intr(struct intsrc *isrc);
128 static void xen_intr_disable_intr(struct intsrc *isrc);
129 static int xen_intr_vector(struct intsrc *isrc);
130 static int xen_intr_source_pending(struct intsrc *isrc);
131 static int xen_intr_config_intr(struct intsrc *isrc,
132 enum intr_trigger trig, enum intr_polarity pol);
133 static int xen_intr_assign_cpu(struct intsrc *isrc, u_int apic_id);
135 static void xen_intr_pirq_enable_source(struct intsrc *isrc);
136 static void xen_intr_pirq_disable_source(struct intsrc *isrc, int eoi);
137 static void xen_intr_pirq_eoi_source(struct intsrc *isrc);
138 static void xen_intr_pirq_enable_intr(struct intsrc *isrc);
141 * PIC interface for all event channel port types except physical IRQs.
143 struct pic xen_intr_pic = {
144 .pic_enable_source = xen_intr_enable_source,
145 .pic_disable_source = xen_intr_disable_source,
146 .pic_eoi_source = xen_intr_eoi_source,
147 .pic_enable_intr = xen_intr_enable_intr,
148 .pic_disable_intr = xen_intr_disable_intr,
149 .pic_vector = xen_intr_vector,
150 .pic_source_pending = xen_intr_source_pending,
151 .pic_suspend = xen_intr_suspend,
152 .pic_resume = xen_intr_resume,
153 .pic_config_intr = xen_intr_config_intr,
154 .pic_assign_cpu = xen_intr_assign_cpu
158 * PIC interface for all event channel representing
159 * physical interrupt sources.
161 struct pic xen_intr_pirq_pic = {
162 .pic_enable_source = xen_intr_pirq_enable_source,
163 .pic_disable_source = xen_intr_pirq_disable_source,
164 .pic_eoi_source = xen_intr_pirq_eoi_source,
165 .pic_enable_intr = xen_intr_pirq_enable_intr,
166 .pic_disable_intr = xen_intr_disable_intr,
167 .pic_vector = xen_intr_vector,
168 .pic_source_pending = xen_intr_source_pending,
169 .pic_suspend = xen_intr_suspend,
170 .pic_resume = xen_intr_resume,
171 .pic_config_intr = xen_intr_config_intr,
172 .pic_assign_cpu = xen_intr_assign_cpu
175 static struct mtx xen_intr_isrc_lock;
176 static int xen_intr_isrc_count;
177 static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS];
179 /*------------------------- Private Functions --------------------------------*/
181 * Disable signal delivery for an event channel port on the
184 * \param port The event channel port to mask.
186 * This API is used to manage the port<=>CPU binding of event
189 * \note This operation does not preclude reception of an event
190 * for this event channel on another CPU. To mask the
191 * event channel globally, use evtchn_mask().
194 evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port)
196 struct xen_intr_pcpu_data *pcpu;
198 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
199 clear_bit(port, pcpu->evtchn_enabled);
203 * Enable signal delivery for an event channel port on the
206 * \param port The event channel port to unmask.
208 * This API is used to manage the port<=>CPU binding of event
211 * \note This operation does not guarantee that event delivery
212 * is enabled for this event channel port. The port must
213 * also be globally enabled. See evtchn_unmask().
216 evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port)
218 struct xen_intr_pcpu_data *pcpu;
220 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
221 set_bit(port, pcpu->evtchn_enabled);
225 * Allocate and register a per-cpu Xen upcall interrupt counter.
227 * \param cpu The cpu for which to register this interrupt count.
230 xen_intr_intrcnt_add(u_int cpu)
232 char buf[MAXCOMLEN + 1];
233 struct xen_intr_pcpu_data *pcpu;
235 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
236 if (pcpu->evtchn_intrcnt != NULL)
239 snprintf(buf, sizeof(buf), "cpu%d:xen", cpu);
240 intrcnt_add(buf, &pcpu->evtchn_intrcnt);
244 * Search for an already allocated but currently unused Xen interrupt
247 * \param type Restrict the search to interrupt sources of the given
250 * \return A pointer to a free Xen interrupt source object or NULL.
252 static struct xenisrc *
253 xen_intr_find_unused_isrc(enum evtchn_type type)
257 KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn isrc lock not held"));
259 for (isrc_idx = 0; isrc_idx < xen_intr_isrc_count; isrc_idx ++) {
260 struct xenisrc *isrc;
263 vector = FIRST_EVTCHN_INT + isrc_idx;
264 isrc = (struct xenisrc *)intr_lookup_source(vector);
266 && isrc->xi_type == EVTCHN_TYPE_UNBOUND) {
267 KASSERT(isrc->xi_intsrc.is_handlers == 0,
268 ("Free evtchn still has handlers"));
269 isrc->xi_type = type;
277 * Allocate a Xen interrupt source object.
279 * \param type The type of interrupt source to create.
281 * \return A pointer to a newly allocated Xen interrupt source
284 static struct xenisrc *
285 xen_intr_alloc_isrc(enum evtchn_type type)
288 struct xenisrc *isrc;
291 KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn alloc lock not held"));
293 if (xen_intr_isrc_count > NR_EVENT_CHANNELS) {
296 printf("xen_intr_alloc: Event channels exhausted.\n");
300 vector = FIRST_EVTCHN_INT + xen_intr_isrc_count;
301 xen_intr_isrc_count++;
303 mtx_unlock(&xen_intr_isrc_lock);
304 isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO);
305 isrc->xi_intsrc.is_pic = &xen_intr_pic;
306 isrc->xi_vector = vector;
307 isrc->xi_type = type;
308 intr_register_source(&isrc->xi_intsrc);
309 mtx_lock(&xen_intr_isrc_lock);
315 * Attempt to free an active Xen interrupt source object.
317 * \param isrc The interrupt source object to release.
319 * \returns EBUSY if the source is still in use, otherwise 0.
322 xen_intr_release_isrc(struct xenisrc *isrc)
325 mtx_lock(&xen_intr_isrc_lock);
326 if (isrc->xi_intsrc.is_handlers != 0) {
327 mtx_unlock(&xen_intr_isrc_lock);
330 evtchn_mask_port(isrc->xi_port);
331 evtchn_clear_port(isrc->xi_port);
333 /* Rebind port to CPU 0. */
334 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
335 evtchn_cpu_unmask_port(0, isrc->xi_port);
337 if (isrc->xi_close != 0 && is_valid_evtchn(isrc->xi_port)) {
338 struct evtchn_close close = { .port = isrc->xi_port };
339 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
340 panic("EVTCHNOP_close failed");
343 xen_intr_port_to_isrc[isrc->xi_port] = NULL;
345 isrc->xi_type = EVTCHN_TYPE_UNBOUND;
347 mtx_unlock(&xen_intr_isrc_lock);
352 * Associate an interrupt handler with an already allocated local Xen
353 * event channel port.
355 * \param isrcp The returned Xen interrupt object associated with
356 * the specified local port.
357 * \param local_port The event channel to bind.
358 * \param type The event channel type of local_port.
359 * \param intr_owner The device making this bind request.
360 * \param filter An interrupt filter handler. Specify NULL
361 * to always dispatch to the ithread handler.
362 * \param handler An interrupt ithread handler. Optional (can
363 * specify NULL) if all necessary event actions
364 * are performed by filter.
365 * \param arg Argument to present to both filter and handler.
366 * \param irqflags Interrupt handler flags. See sys/bus.h.
367 * \param handlep Pointer to an opaque handle used to manage this
370 * \returns 0 on success, otherwise an errno.
373 xen_intr_bind_isrc(struct xenisrc **isrcp, evtchn_port_t local_port,
374 enum evtchn_type type, device_t intr_owner, driver_filter_t filter,
375 driver_intr_t handler, void *arg, enum intr_type flags,
376 xen_intr_handle_t *port_handlep)
378 struct xenisrc *isrc;
382 if (port_handlep == NULL) {
383 device_printf(intr_owner,
384 "xen_intr_bind_isrc: Bad event handle\n");
388 mtx_lock(&xen_intr_isrc_lock);
389 isrc = xen_intr_find_unused_isrc(type);
391 isrc = xen_intr_alloc_isrc(type);
393 mtx_unlock(&xen_intr_isrc_lock);
397 isrc->xi_port = local_port;
398 xen_intr_port_to_isrc[local_port] = isrc;
399 mtx_unlock(&xen_intr_isrc_lock);
401 error = intr_add_handler(device_get_nameunit(intr_owner),
402 isrc->xi_vector, filter, handler, arg,
403 flags|INTR_EXCL, port_handlep);
405 device_printf(intr_owner,
406 "xen_intr_bind_irq: intr_add_handler failed\n");
407 xen_intr_release_isrc(isrc);
411 evtchn_unmask_port(local_port);
416 * Lookup a Xen interrupt source object given an interrupt binding handle.
418 * \param handle A handle initialized by a previous call to
419 * xen_intr_bind_isrc().
421 * \returns A pointer to the Xen interrupt source object associated
422 * with the given interrupt handle. NULL if no association
425 static struct xenisrc *
426 xen_intr_isrc(xen_intr_handle_t handle)
428 struct intr_handler *ih;
431 if (ih == NULL || ih->ih_event == NULL)
434 return (ih->ih_event->ie_source);
438 * Determine the event channel ports at the given section of the
439 * event port bitmap which have pending events for the given cpu.
441 * \param pcpu The Xen interrupt pcpu data for the cpu being querried.
442 * \param sh The Xen shared info area.
443 * \param idx The index of the section of the event channel bitmap to
446 * \returns A u_long with bits set for every event channel with pending
450 xen_intr_active_ports(struct xen_intr_pcpu_data *pcpu, shared_info_t *sh,
453 return (sh->evtchn_pending[idx]
454 & ~sh->evtchn_mask[idx]
455 & pcpu->evtchn_enabled[idx]);
459 * Interrupt handler for processing all Xen event channel events.
461 * \param trap_frame The trap frame context for the current interrupt.
464 xen_intr_handle_upcall(struct trapframe *trap_frame)
466 u_int l1i, l2i, port, cpu;
467 u_long masked_l1, masked_l2;
468 struct xenisrc *isrc;
471 struct xen_intr_pcpu_data *pc;
475 * Disable preemption in order to always check and fire events
480 cpu = PCPU_GET(cpuid);
481 pc = DPCPU_PTR(xen_intr_pcpu);
482 s = HYPERVISOR_shared_info;
483 v = DPCPU_GET(vcpu_info);
485 if (xen_hvm_domain() && !xen_vector_callback_enabled) {
486 KASSERT((cpu == 0), ("Fired PCI event callback on wrong CPU"));
489 v->evtchn_upcall_pending = 0;
492 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
493 /* Clear master flag /before/ clearing selector flag. */
498 l1 = atomic_readandclear_long(&v->evtchn_pending_sel);
500 l1i = pc->last_processed_l1i;
501 l2i = pc->last_processed_l2i;
502 (*pc->evtchn_intrcnt)++;
506 l1i = (l1i + 1) % LONG_BIT;
507 masked_l1 = l1 & ((~0UL) << l1i);
509 if (masked_l1 == 0) {
511 * if we masked out all events, wrap around
518 l1i = ffsl(masked_l1) - 1;
521 l2 = xen_intr_active_ports(pc, s, l1i);
523 l2i = (l2i + 1) % LONG_BIT;
524 masked_l2 = l2 & ((~0UL) << l2i);
526 if (masked_l2 == 0) {
527 /* if we masked out all events, move on */
531 l2i = ffsl(masked_l2) - 1;
534 port = (l1i * LONG_BIT) + l2i;
535 synch_clear_bit(port, &s->evtchn_pending[0]);
537 isrc = xen_intr_port_to_isrc[port];
538 if (__predict_false(isrc == NULL))
541 /* Make sure we are firing on the right vCPU */
542 KASSERT((isrc->xi_cpu == PCPU_GET(cpuid)),
543 ("Received unexpected event on vCPU#%d, event bound to vCPU#%d",
544 PCPU_GET(cpuid), isrc->xi_cpu));
546 intr_execute_handlers(&isrc->xi_intsrc, trap_frame);
549 * If this is the final port processed,
550 * we'll pick up here+1 next time.
552 pc->last_processed_l1i = l1i;
553 pc->last_processed_l2i = l2i;
555 } while (l2i != LONG_BIT - 1);
557 l2 = xen_intr_active_ports(pc, s, l1i);
560 * We handled all ports, so we can clear the
570 xen_intr_init(void *dummy __unused)
572 struct xen_intr_pcpu_data *pcpu;
578 mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF);
581 * Register interrupt count manually as we aren't
582 * guaranteed to see a call to xen_intr_assign_cpu()
583 * before our first interrupt. Also set the per-cpu
584 * mask of CPU#0 to enable all, since by default
585 * all event channels are bound to CPU#0.
588 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
589 memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
590 sizeof(pcpu->evtchn_enabled));
591 xen_intr_intrcnt_add(i);
594 intr_register_pic(&xen_intr_pic);
598 SYSINIT(xen_intr_init, SI_SUB_INTR, SI_ORDER_MIDDLE, xen_intr_init, NULL);
600 /*--------------------------- Common PIC Functions ---------------------------*/
602 * Prepare this PIC for system suspension.
605 xen_intr_suspend(struct pic *unused)
610 xen_rebind_ipi(struct xenisrc *isrc)
613 int cpu = isrc->xi_cpu;
614 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
616 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
618 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
621 panic("unable to rebind xen IPI: %d", error);
623 isrc->xi_port = bind_ipi.port;
625 xen_intr_port_to_isrc[bind_ipi.port] = isrc;
627 error = xen_intr_assign_cpu(&isrc->xi_intsrc,
630 panic("unable to bind xen IPI to CPU#%d: %d",
633 evtchn_unmask_port(bind_ipi.port);
635 panic("Resume IPI event channel on UP");
640 xen_rebind_virq(struct xenisrc *isrc)
642 int cpu = isrc->xi_cpu;
643 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
645 struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq,
648 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
651 panic("unable to rebind xen VIRQ#%d: %d", isrc->xi_virq, error);
653 isrc->xi_port = bind_virq.port;
655 xen_intr_port_to_isrc[bind_virq.port] = isrc;
658 error = xen_intr_assign_cpu(&isrc->xi_intsrc,
661 panic("unable to bind xen VIRQ#%d to CPU#%d: %d",
662 isrc->xi_virq, cpu, error);
665 evtchn_unmask_port(bind_virq.port);
669 * Return this PIC to service after being suspended.
672 xen_intr_resume(struct pic *unused, bool suspend_cancelled)
674 shared_info_t *s = HYPERVISOR_shared_info;
675 struct xenisrc *isrc;
679 if (suspend_cancelled)
682 /* Reset the per-CPU masks */
684 struct xen_intr_pcpu_data *pcpu;
686 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
687 memset(pcpu->evtchn_enabled,
688 i == 0 ? ~0 : 0, sizeof(pcpu->evtchn_enabled));
691 /* Mask all event channels. */
692 for (i = 0; i < nitems(s->evtchn_mask); i++)
693 atomic_store_rel_long(&s->evtchn_mask[i], ~0);
695 /* Remove port -> isrc mappings */
696 memset(xen_intr_port_to_isrc, 0, sizeof(xen_intr_port_to_isrc));
698 /* Free unused isrcs and rebind VIRQs and IPIs */
699 for (isrc_idx = 0; isrc_idx < xen_intr_isrc_count; isrc_idx++) {
702 vector = FIRST_EVTCHN_INT + isrc_idx;
703 isrc = (struct xenisrc *)intr_lookup_source(vector);
706 switch (isrc->xi_type) {
707 case EVTCHN_TYPE_IPI:
708 xen_rebind_ipi(isrc);
710 case EVTCHN_TYPE_VIRQ:
711 xen_rebind_virq(isrc);
714 intr_remove_handler(isrc->xi_cookie);
716 isrc->xi_type = EVTCHN_TYPE_UNBOUND;
717 isrc->xi_cookie = NULL;
725 * Disable a Xen interrupt source.
727 * \param isrc The interrupt source to disable.
730 xen_intr_disable_intr(struct intsrc *base_isrc)
732 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
734 evtchn_mask_port(isrc->xi_port);
738 * Determine the global interrupt vector number for
739 * a Xen interrupt source.
741 * \param isrc The interrupt source to query.
743 * \return The vector number corresponding to the given interrupt source.
746 xen_intr_vector(struct intsrc *base_isrc)
748 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
750 return (isrc->xi_vector);
754 * Determine whether or not interrupt events are pending on the
755 * the given interrupt source.
757 * \param isrc The interrupt source to query.
759 * \returns 0 if no events are pending, otherwise non-zero.
762 xen_intr_source_pending(struct intsrc *isrc)
765 * EventChannels are edge triggered and never masked.
766 * There can be no pending events.
772 * Perform configuration of an interrupt source.
774 * \param isrc The interrupt source to configure.
775 * \param trig Edge or level.
776 * \param pol Active high or low.
778 * \returns 0 if no events are pending, otherwise non-zero.
781 xen_intr_config_intr(struct intsrc *isrc, enum intr_trigger trig,
782 enum intr_polarity pol)
784 /* Configuration is only possible via the evtchn apis. */
789 * Configure CPU affinity for interrupt source event delivery.
791 * \param isrc The interrupt source to configure.
792 * \param apic_id The apic id of the CPU for handling future events.
794 * \returns 0 if successful, otherwise an errno.
797 xen_intr_assign_cpu(struct intsrc *base_isrc, u_int apic_id)
800 struct evtchn_bind_vcpu bind_vcpu;
801 struct xenisrc *isrc;
802 u_int to_cpu, vcpu_id;
806 if (xen_vector_callback_enabled == 0)
810 to_cpu = apic_cpuid(apic_id);
811 vcpu_id = pcpu_find(to_cpu)->pc_vcpu_id;
812 xen_intr_intrcnt_add(to_cpu);
814 mtx_lock(&xen_intr_isrc_lock);
815 isrc = (struct xenisrc *)base_isrc;
816 if (!is_valid_evtchn(isrc->xi_port)) {
817 mtx_unlock(&xen_intr_isrc_lock);
821 if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) ||
822 (isrc->xi_type == EVTCHN_TYPE_IPI)) {
824 * Virtual IRQs are associated with a cpu by
825 * the Hypervisor at evtchn_bind_virq time, so
826 * all we need to do is update the per-CPU masks.
828 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
829 isrc->xi_cpu = to_cpu;
830 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
831 mtx_unlock(&xen_intr_isrc_lock);
835 bind_vcpu.port = isrc->xi_port;
836 bind_vcpu.vcpu = vcpu_id;
839 * Allow interrupts to be fielded on the new VCPU before
840 * we ask the hypervisor to deliver them there.
842 evtchn_cpu_unmask_port(to_cpu, isrc->xi_port);
843 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu);
844 if (isrc->xi_cpu != to_cpu) {
846 /* Commit to new binding by removing the old one. */
847 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
848 isrc->xi_cpu = to_cpu;
850 /* Roll-back to previous binding. */
851 evtchn_cpu_mask_port(to_cpu, isrc->xi_port);
854 mtx_unlock(&xen_intr_isrc_lock);
861 /*------------------- Virtual Interrupt Source PIC Functions -----------------*/
863 * Mask a level triggered interrupt source.
865 * \param isrc The interrupt source to mask (if necessary).
866 * \param eoi If non-zero, perform any necessary end-of-interrupt
870 xen_intr_disable_source(struct intsrc *isrc, int eoi)
875 * Unmask a level triggered interrupt source.
877 * \param isrc The interrupt source to unmask (if necessary).
880 xen_intr_enable_source(struct intsrc *isrc)
885 * Perform any necessary end-of-interrupt acknowledgements.
887 * \param isrc The interrupt source to EOI.
890 xen_intr_eoi_source(struct intsrc *isrc)
895 * Enable and unmask the interrupt source.
897 * \param isrc The interrupt source to enable.
900 xen_intr_enable_intr(struct intsrc *base_isrc)
902 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
904 evtchn_unmask_port(isrc->xi_port);
907 /*------------------ Physical Interrupt Source PIC Functions -----------------*/
909 * Mask a level triggered interrupt source.
911 * \param isrc The interrupt source to mask (if necessary).
912 * \param eoi If non-zero, perform any necessary end-of-interrupt
916 xen_intr_pirq_disable_source(struct intsrc *base_isrc, int eoi)
918 struct xenisrc *isrc;
920 isrc = (struct xenisrc *)base_isrc;
921 evtchn_mask_port(isrc->xi_port);
925 * Unmask a level triggered interrupt source.
927 * \param isrc The interrupt source to unmask (if necessary).
930 xen_intr_pirq_enable_source(struct intsrc *base_isrc)
932 struct xenisrc *isrc;
934 isrc = (struct xenisrc *)base_isrc;
935 evtchn_unmask_port(isrc->xi_port);
939 * Perform any necessary end-of-interrupt acknowledgements.
941 * \param isrc The interrupt source to EOI.
944 xen_intr_pirq_eoi_source(struct intsrc *base_isrc)
946 struct xenisrc *isrc;
948 /* XXX Use shared page of flags for this. */
949 isrc = (struct xenisrc *)base_isrc;
950 if (isrc->xi_needs_eoi != 0) {
951 struct physdev_eoi eoi = { .irq = isrc->xi_pirq };
953 (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
958 * Enable and unmask the interrupt source.
960 * \param isrc The interrupt source to enable.
963 xen_intr_pirq_enable_intr(struct intsrc *isrc)
967 /*--------------------------- Public Functions -------------------------------*/
968 /*------- API comments for these methods can be found in xen/xenintr.h -------*/
970 xen_intr_bind_local_port(device_t dev, evtchn_port_t local_port,
971 driver_filter_t filter, driver_intr_t handler, void *arg,
972 enum intr_type flags, xen_intr_handle_t *port_handlep)
974 struct xenisrc *isrc;
977 error = xen_intr_bind_isrc(&isrc, local_port, EVTCHN_TYPE_PORT, dev,
978 filter, handler, arg, flags, port_handlep);
983 * The Event Channel API didn't open this port, so it is not
984 * responsible for closing it automatically on unbind.
991 xen_intr_alloc_and_bind_local_port(device_t dev, u_int remote_domain,
992 driver_filter_t filter, driver_intr_t handler, void *arg,
993 enum intr_type flags, xen_intr_handle_t *port_handlep)
995 struct xenisrc *isrc;
996 struct evtchn_alloc_unbound alloc_unbound;
999 alloc_unbound.dom = DOMID_SELF;
1000 alloc_unbound.remote_dom = remote_domain;
1001 error = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
1005 * XXX Trap Hypercall error code Linuxisms in
1006 * the HYPERCALL layer.
1011 error = xen_intr_bind_isrc(&isrc, alloc_unbound.port, EVTCHN_TYPE_PORT,
1012 dev, filter, handler, arg, flags,
1015 evtchn_close_t close = { .port = alloc_unbound.port };
1016 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1017 panic("EVTCHNOP_close failed");
1026 xen_intr_bind_remote_port(device_t dev, u_int remote_domain,
1027 u_int remote_port, driver_filter_t filter, driver_intr_t handler,
1028 void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep)
1030 struct xenisrc *isrc;
1031 struct evtchn_bind_interdomain bind_interdomain;
1034 bind_interdomain.remote_dom = remote_domain;
1035 bind_interdomain.remote_port = remote_port;
1036 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
1040 * XXX Trap Hypercall error code Linuxisms in
1041 * the HYPERCALL layer.
1046 error = xen_intr_bind_isrc(&isrc, bind_interdomain.local_port,
1047 EVTCHN_TYPE_PORT, dev, filter, handler,
1048 arg, flags, port_handlep);
1050 evtchn_close_t close = { .port = bind_interdomain.local_port };
1051 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1052 panic("EVTCHNOP_close failed");
1057 * The Event Channel API opened this port, so it is
1058 * responsible for closing it automatically on unbind.
1065 xen_intr_bind_virq(device_t dev, u_int virq, u_int cpu,
1066 driver_filter_t filter, driver_intr_t handler, void *arg,
1067 enum intr_type flags, xen_intr_handle_t *port_handlep)
1069 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
1070 struct xenisrc *isrc;
1071 struct evtchn_bind_virq bind_virq = { .virq = virq, .vcpu = vcpu_id };
1074 /* Ensure the target CPU is ready to handle evtchn interrupts. */
1075 xen_intr_intrcnt_add(cpu);
1078 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
1081 * XXX Trap Hypercall error code Linuxisms in
1082 * the HYPERCALL layer.
1087 error = xen_intr_bind_isrc(&isrc, bind_virq.port, EVTCHN_TYPE_VIRQ, dev,
1088 filter, handler, arg, flags, port_handlep);
1092 error = intr_event_bind(isrc->xi_intsrc.is_event, cpu);
1096 evtchn_close_t close = { .port = bind_virq.port };
1098 xen_intr_unbind(*port_handlep);
1099 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1100 panic("EVTCHNOP_close failed");
1105 if (isrc->xi_cpu != cpu) {
1107 * Too early in the boot process for the generic interrupt
1108 * code to perform the binding. Update our event channel
1109 * masks manually so events can't fire on the wrong cpu
1110 * during AP startup.
1112 xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
1117 * The Event Channel API opened this port, so it is
1118 * responsible for closing it automatically on unbind.
1121 isrc->xi_virq = virq;
1127 xen_intr_alloc_and_bind_ipi(device_t dev, u_int cpu,
1128 driver_filter_t filter, enum intr_type flags,
1129 xen_intr_handle_t *port_handlep)
1132 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
1133 struct xenisrc *isrc;
1134 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
1137 /* Ensure the target CPU is ready to handle evtchn interrupts. */
1138 xen_intr_intrcnt_add(cpu);
1141 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
1144 * XXX Trap Hypercall error code Linuxisms in
1145 * the HYPERCALL layer.
1150 error = xen_intr_bind_isrc(&isrc, bind_ipi.port, EVTCHN_TYPE_IPI,
1151 dev, filter, NULL, NULL, flags,
1154 error = intr_event_bind(isrc->xi_intsrc.is_event, cpu);
1157 evtchn_close_t close = { .port = bind_ipi.port };
1159 xen_intr_unbind(*port_handlep);
1160 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1161 panic("EVTCHNOP_close failed");
1165 if (isrc->xi_cpu != cpu) {
1167 * Too early in the boot process for the generic interrupt
1168 * code to perform the binding. Update our event channel
1169 * masks manually so events can't fire on the wrong cpu
1170 * during AP startup.
1172 xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
1176 * The Event Channel API opened this port, so it is
1177 * responsible for closing it automatically on unbind.
1182 return (EOPNOTSUPP);
1187 xen_intr_describe(xen_intr_handle_t port_handle, const char *fmt, ...)
1189 char descr[MAXCOMLEN + 1];
1190 struct xenisrc *isrc;
1193 isrc = xen_intr_isrc(port_handle);
1198 vsnprintf(descr, sizeof(descr), fmt, ap);
1200 return (intr_describe(isrc->xi_vector, port_handle, descr));
1204 xen_intr_unbind(xen_intr_handle_t *port_handlep)
1206 struct intr_handler *handler;
1207 struct xenisrc *isrc;
1209 handler = *port_handlep;
1210 *port_handlep = NULL;
1211 isrc = xen_intr_isrc(handler);
1215 intr_remove_handler(handler);
1216 xen_intr_release_isrc(isrc);
1220 xen_intr_signal(xen_intr_handle_t handle)
1222 struct xenisrc *isrc;
1224 isrc = xen_intr_isrc(handle);
1226 KASSERT(isrc->xi_type == EVTCHN_TYPE_PORT ||
1227 isrc->xi_type == EVTCHN_TYPE_IPI,
1228 ("evtchn_signal on something other than a local port"));
1229 struct evtchn_send send = { .port = isrc->xi_port };
1230 (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
1235 xen_intr_port(xen_intr_handle_t handle)
1237 struct xenisrc *isrc;
1239 isrc = xen_intr_isrc(handle);
1243 return (isrc->xi_port);