1 /******************************************************************************
4 * A simplified event channel for para-drivers in unmodified linux
6 * Copyright (c) 2002-2005, K A Fraser
7 * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
9 * This file may be distributed separately from the Linux kernel, or
10 * incorporated into other software packages, subject to the following license:
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/limits.h>
41 #include <sys/mutex.h>
42 #include <sys/interrupt.h>
45 #include <machine/xen/xen-os.h>
46 #include <machine/xen/xenvar.h>
47 #include <xen/hypervisor.h>
48 #include <xen/xen_intr.h>
49 #include <xen/evtchn.h>
52 #include <dev/xen/xenpci/xenpcivar.h>
54 static inline unsigned long __ffs(unsigned long word)
62 #define is_valid_evtchn(x) ((x) != 0)
63 #define evtchn_from_irq(x) (irq_evtchn[irq].evtchn)
67 driver_intr_t *handler;
70 int close:1; /* close on unbind_from_irqhandler()? */
75 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
76 [0 ... NR_EVENT_CHANNELS-1] = -1 };
78 static struct mtx irq_alloc_lock;
79 static device_t xenpci_device;
81 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
89 mtx_lock(&irq_alloc_lock);
91 for (irq = 1; irq < ARRAY_SIZE(irq_evtchn); irq++) {
92 if (irq_evtchn[irq].inuse)
94 irq_evtchn[irq].inuse = 1;
95 mtx_unlock(&irq_alloc_lock);
101 printf("alloc_xen_irq: No available IRQ to bind to: "
102 "increase irq_evtchn[] size in evtchn.c.\n");
105 mtx_unlock(&irq_alloc_lock);
111 free_xen_irq(int irq)
114 mtx_lock(&irq_alloc_lock);
115 irq_evtchn[irq].inuse = 0;
116 mtx_unlock(&irq_alloc_lock);
120 irq_to_evtchn_port(int irq)
123 return irq_evtchn[irq].evtchn;
127 mask_evtchn(int port)
129 shared_info_t *s = HYPERVISOR_shared_info;
131 synch_set_bit(port, &s->evtchn_mask[0]);
135 unmask_evtchn(int port)
137 evtchn_unmask_t op = { .port = port };
139 HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &op);
143 bind_listening_port_to_irqhandler(unsigned int remote_domain,
144 const char *devname, driver_intr_t handler, void *arg,
145 unsigned long irqflags, unsigned int *irqp)
147 struct evtchn_alloc_unbound alloc_unbound;
151 irq = alloc_xen_irq();
155 mtx_lock(&irq_evtchn[irq].lock);
157 alloc_unbound.dom = DOMID_SELF;
158 alloc_unbound.remote_dom = remote_domain;
159 error = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
162 mtx_unlock(&irq_evtchn[irq].lock);
167 irq_evtchn[irq].handler = handler;
168 irq_evtchn[irq].arg = arg;
169 irq_evtchn[irq].evtchn = alloc_unbound.port;
170 irq_evtchn[irq].close = 1;
171 irq_evtchn[irq].mpsafe = (irqflags & INTR_MPSAFE) != 0;
173 evtchn_to_irq[alloc_unbound.port] = irq;
175 unmask_evtchn(alloc_unbound.port);
177 mtx_unlock(&irq_evtchn[irq].lock);
185 bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
186 unsigned int remote_port, const char *devname, driver_intr_t handler,
187 void *arg, unsigned long irqflags, unsigned int *irqp)
189 struct evtchn_bind_interdomain bind_interdomain;
193 irq = alloc_xen_irq();
197 mtx_lock(&irq_evtchn[irq].lock);
199 bind_interdomain.remote_dom = remote_domain;
200 bind_interdomain.remote_port = remote_port;
201 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
204 mtx_unlock(&irq_evtchn[irq].lock);
209 irq_evtchn[irq].handler = handler;
210 irq_evtchn[irq].arg = arg;
211 irq_evtchn[irq].evtchn = bind_interdomain.local_port;
212 irq_evtchn[irq].close = 1;
213 irq_evtchn[irq].mpsafe = (irqflags & INTR_MPSAFE) != 0;
215 evtchn_to_irq[bind_interdomain.local_port] = irq;
217 unmask_evtchn(bind_interdomain.local_port);
219 mtx_unlock(&irq_evtchn[irq].lock);
228 bind_caller_port_to_irqhandler(unsigned int caller_port,
229 const char *devname, driver_intr_t handler, void *arg,
230 unsigned long irqflags, unsigned int *irqp)
234 irq = alloc_xen_irq();
238 mtx_lock(&irq_evtchn[irq].lock);
240 irq_evtchn[irq].handler = handler;
241 irq_evtchn[irq].arg = arg;
242 irq_evtchn[irq].evtchn = caller_port;
243 irq_evtchn[irq].close = 0;
244 irq_evtchn[irq].mpsafe = (irqflags & INTR_MPSAFE) != 0;
246 evtchn_to_irq[caller_port] = irq;
248 unmask_evtchn(caller_port);
250 mtx_unlock(&irq_evtchn[irq].lock);
258 unbind_from_irqhandler(unsigned int irq)
262 mtx_lock(&irq_evtchn[irq].lock);
264 evtchn = evtchn_from_irq(irq);
266 if (is_valid_evtchn(evtchn)) {
267 evtchn_to_irq[evtchn] = -1;
269 if (irq_evtchn[irq].close) {
270 struct evtchn_close close = { .port = evtchn };
271 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
272 panic("EVTCHNOP_close failed");
276 irq_evtchn[irq].handler = NULL;
277 irq_evtchn[irq].evtchn = 0;
279 mtx_unlock(&irq_evtchn[irq].lock);
281 while (irq_evtchn[irq].in_handler)
287 void notify_remote_via_irq(int irq)
291 evtchn = evtchn_from_irq(irq);
292 if (is_valid_evtchn(evtchn))
293 notify_remote_via_evtchn(evtchn);
296 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
299 return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
303 evtchn_interrupt(void *arg)
305 unsigned int l1i, l2i, port;
306 unsigned long masked_l1, masked_l2;
307 /* XXX: All events are bound to vcpu0 but irq may be redirected. */
308 int cpu = 0; /*smp_processor_id();*/
309 driver_intr_t *handler;
311 int irq, handler_mpsafe;
312 shared_info_t *s = HYPERVISOR_shared_info;
313 vcpu_info_t *v = &s->vcpu_info[cpu];
314 struct pcpu *pc = pcpu_find(cpu);
315 unsigned long l1, l2;
317 v->evtchn_upcall_pending = 0;
320 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
321 /* Clear master flag /before/ clearing selector flag. */
326 l1 = atomic_readandclear_long(&v->evtchn_pending_sel);
328 l1i = pc->pc_last_processed_l1i;
329 l2i = pc->pc_last_processed_l2i;
333 l1i = (l1i + 1) % LONG_BIT;
334 masked_l1 = l1 & ((~0UL) << l1i);
336 if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */
341 l1i = __ffs(masked_l1);
344 l2 = active_evtchns(cpu, s, l1i);
346 l2i = (l2i + 1) % LONG_BIT;
347 masked_l2 = l2 & ((~0UL) << l2i);
349 if (masked_l2 == 0) { /* if we masked out all events, move on */
353 l2i = __ffs(masked_l2);
356 port = (l1i * LONG_BIT) + l2i;
357 synch_clear_bit(port, &s->evtchn_pending[0]);
359 irq = evtchn_to_irq[port];
363 mtx_lock(&irq_evtchn[irq].lock);
364 handler = irq_evtchn[irq].handler;
365 handler_arg = irq_evtchn[irq].arg;
366 handler_mpsafe = irq_evtchn[irq].mpsafe;
367 if (unlikely(handler == NULL)) {
368 printf("Xen IRQ%d (port %d) has no handler!\n",
370 mtx_unlock(&irq_evtchn[irq].lock);
373 irq_evtchn[irq].in_handler = 1;
374 mtx_unlock(&irq_evtchn[irq].lock);
376 //local_irq_enable();
379 handler(handler_arg);
382 //local_irq_disable();
384 mtx_lock(&irq_evtchn[irq].lock);
385 irq_evtchn[irq].in_handler = 0;
386 mtx_unlock(&irq_evtchn[irq].lock);
388 /* if this is the final port processed, we'll pick up here+1 next time */
389 pc->pc_last_processed_l1i = l1i;
390 pc->pc_last_processed_l2i = l2i;
392 } while (l2i != LONG_BIT - 1);
394 l2 = active_evtchns(cpu, s, l1i);
395 if (l2 == 0) /* we handled all ports, so we can clear the selector bit */
403 struct xenpci_softc *scp = device_get_softc(xenpci_device);
406 * Take our interrupt handler out of the list of handlers
407 * that can handle this irq.
409 if (scp->intr_cookie != NULL) {
410 if (BUS_TEARDOWN_INTR(device_get_parent(xenpci_device),
411 xenpci_device, scp->res_irq, scp->intr_cookie) != 0)
412 printf("intr teardown failed.. continuing\n");
413 scp->intr_cookie = NULL;
420 struct xenpci_softc *scp = device_get_softc(xenpci_device);
423 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) {
425 evtchn_to_irq[evtchn] = -1;
428 for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++)
429 irq_evtchn[irq].evtchn = 0;
431 BUS_SETUP_INTR(device_get_parent(xenpci_device),
432 xenpci_device, scp->res_irq, INTR_TYPE_MISC,
433 NULL, evtchn_interrupt, NULL, &scp->intr_cookie);
437 xenpci_irq_init(device_t device, struct xenpci_softc *scp)
442 mtx_init(&irq_alloc_lock, "xen-irq-lock", NULL, MTX_DEF);
444 for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++)
445 mtx_init(&irq_evtchn[irq].lock, "irq-evtchn", NULL, MTX_DEF);
447 for (cpu = 0; cpu < mp_ncpus; cpu++) {
448 pcpu_find(cpu)->pc_last_processed_l1i = LONG_BIT - 1;
449 pcpu_find(cpu)->pc_last_processed_l2i = LONG_BIT - 1;
452 error = BUS_SETUP_INTR(device_get_parent(device), device,
453 scp->res_irq, INTR_MPSAFE|INTR_TYPE_MISC, NULL, evtchn_interrupt,
454 NULL, &scp->intr_cookie);
458 xenpci_device = device;