1 /******************************************************************************
4 * A simplified event channel for para-drivers in unmodified linux
6 * Copyright (c) 2002-2005, K A Fraser
7 * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
9 * This file may be distributed separately from the Linux kernel, or
10 * incorporated into other software packages, subject to the following license:
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/limits.h>
41 #include <sys/mutex.h>
42 #include <sys/interrupt.h>
45 #include <machine/xen/xen-os.h>
46 #include <machine/xen/xenvar.h>
47 #include <xen/hypervisor.h>
48 #include <xen/xen_intr.h>
49 #include <xen/evtchn.h>
52 #include <dev/xen/xenpci/xenpcivar.h>
55 #define __ffs(word) (ffs(word) - 1)
56 #elif defined(__amd64__)
57 static inline unsigned long __ffs(unsigned long word)
61 :"rm" (word)); /* XXXRW: why no "cc"? */
65 #error "evtchn: unsupported architecture"
68 #define is_valid_evtchn(x) ((x) != 0)
69 #define evtchn_from_irq(x) (irq_evtchn[irq].evtchn)
73 driver_intr_t *handler;
76 int close:1; /* close on unbind_from_irqhandler()? */
81 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
82 [0 ... NR_EVENT_CHANNELS-1] = -1 };
84 static struct mtx irq_alloc_lock;
85 static device_t xenpci_device;
87 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
95 mtx_lock(&irq_alloc_lock);
97 for (irq = 1; irq < ARRAY_SIZE(irq_evtchn); irq++) {
98 if (irq_evtchn[irq].inuse)
100 irq_evtchn[irq].inuse = 1;
101 mtx_unlock(&irq_alloc_lock);
107 printf("alloc_xen_irq: No available IRQ to bind to: "
108 "increase irq_evtchn[] size in evtchn.c.\n");
111 mtx_unlock(&irq_alloc_lock);
117 free_xen_irq(int irq)
120 mtx_lock(&irq_alloc_lock);
121 irq_evtchn[irq].inuse = 0;
122 mtx_unlock(&irq_alloc_lock);
126 irq_to_evtchn_port(int irq)
129 return irq_evtchn[irq].evtchn;
133 mask_evtchn(int port)
135 shared_info_t *s = HYPERVISOR_shared_info;
137 synch_set_bit(port, &s->evtchn_mask[0]);
141 unmask_evtchn(int port)
143 evtchn_unmask_t op = { .port = port };
145 HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &op);
149 bind_listening_port_to_irqhandler(unsigned int remote_domain,
150 const char *devname, driver_intr_t handler, void *arg,
151 unsigned long irqflags, unsigned int *irqp)
153 struct evtchn_alloc_unbound alloc_unbound;
157 irq = alloc_xen_irq();
161 mtx_lock(&irq_evtchn[irq].lock);
163 alloc_unbound.dom = DOMID_SELF;
164 alloc_unbound.remote_dom = remote_domain;
165 error = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
168 mtx_unlock(&irq_evtchn[irq].lock);
173 irq_evtchn[irq].handler = handler;
174 irq_evtchn[irq].arg = arg;
175 irq_evtchn[irq].evtchn = alloc_unbound.port;
176 irq_evtchn[irq].close = 1;
177 irq_evtchn[irq].mpsafe = (irqflags & INTR_MPSAFE) != 0;
179 evtchn_to_irq[alloc_unbound.port] = irq;
181 unmask_evtchn(alloc_unbound.port);
183 mtx_unlock(&irq_evtchn[irq].lock);
191 bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
192 unsigned int remote_port, const char *devname, driver_intr_t handler,
193 void *arg, unsigned long irqflags, unsigned int *irqp)
195 struct evtchn_bind_interdomain bind_interdomain;
199 irq = alloc_xen_irq();
203 mtx_lock(&irq_evtchn[irq].lock);
205 bind_interdomain.remote_dom = remote_domain;
206 bind_interdomain.remote_port = remote_port;
207 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
210 mtx_unlock(&irq_evtchn[irq].lock);
215 irq_evtchn[irq].handler = handler;
216 irq_evtchn[irq].arg = arg;
217 irq_evtchn[irq].evtchn = bind_interdomain.local_port;
218 irq_evtchn[irq].close = 1;
219 irq_evtchn[irq].mpsafe = (irqflags & INTR_MPSAFE) != 0;
221 evtchn_to_irq[bind_interdomain.local_port] = irq;
223 unmask_evtchn(bind_interdomain.local_port);
225 mtx_unlock(&irq_evtchn[irq].lock);
234 bind_caller_port_to_irqhandler(unsigned int caller_port,
235 const char *devname, driver_intr_t handler, void *arg,
236 unsigned long irqflags, unsigned int *irqp)
240 irq = alloc_xen_irq();
244 mtx_lock(&irq_evtchn[irq].lock);
246 irq_evtchn[irq].handler = handler;
247 irq_evtchn[irq].arg = arg;
248 irq_evtchn[irq].evtchn = caller_port;
249 irq_evtchn[irq].close = 0;
250 irq_evtchn[irq].mpsafe = (irqflags & INTR_MPSAFE) != 0;
252 evtchn_to_irq[caller_port] = irq;
254 unmask_evtchn(caller_port);
256 mtx_unlock(&irq_evtchn[irq].lock);
264 unbind_from_irqhandler(unsigned int irq)
268 mtx_lock(&irq_evtchn[irq].lock);
270 evtchn = evtchn_from_irq(irq);
272 if (is_valid_evtchn(evtchn)) {
273 evtchn_to_irq[evtchn] = -1;
275 if (irq_evtchn[irq].close) {
276 struct evtchn_close close = { .port = evtchn };
277 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
278 panic("EVTCHNOP_close failed");
282 irq_evtchn[irq].handler = NULL;
283 irq_evtchn[irq].evtchn = 0;
285 mtx_unlock(&irq_evtchn[irq].lock);
287 while (irq_evtchn[irq].in_handler)
293 void notify_remote_via_irq(int irq)
297 evtchn = evtchn_from_irq(irq);
298 if (is_valid_evtchn(evtchn))
299 notify_remote_via_evtchn(evtchn);
302 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
305 return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
309 evtchn_interrupt(void *arg)
311 unsigned int l1i, l2i, port;
312 unsigned long masked_l1, masked_l2;
313 /* XXX: All events are bound to vcpu0 but irq may be redirected. */
314 int cpu = 0; /*smp_processor_id();*/
315 driver_intr_t *handler;
317 int irq, handler_mpsafe;
318 shared_info_t *s = HYPERVISOR_shared_info;
319 vcpu_info_t *v = &s->vcpu_info[cpu];
320 struct pcpu *pc = pcpu_find(cpu);
321 unsigned long l1, l2;
323 v->evtchn_upcall_pending = 0;
326 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
327 /* Clear master flag /before/ clearing selector flag. */
332 l1 = atomic_readandclear_long(&v->evtchn_pending_sel);
334 l1i = pc->pc_last_processed_l1i;
335 l2i = pc->pc_last_processed_l2i;
339 l1i = (l1i + 1) % LONG_BIT;
340 masked_l1 = l1 & ((~0UL) << l1i);
342 if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */
347 l1i = __ffs(masked_l1);
350 l2 = active_evtchns(cpu, s, l1i);
352 l2i = (l2i + 1) % LONG_BIT;
353 masked_l2 = l2 & ((~0UL) << l2i);
355 if (masked_l2 == 0) { /* if we masked out all events, move on */
359 l2i = __ffs(masked_l2);
362 port = (l1i * LONG_BIT) + l2i;
363 synch_clear_bit(port, &s->evtchn_pending[0]);
365 irq = evtchn_to_irq[port];
369 mtx_lock(&irq_evtchn[irq].lock);
370 handler = irq_evtchn[irq].handler;
371 handler_arg = irq_evtchn[irq].arg;
372 handler_mpsafe = irq_evtchn[irq].mpsafe;
373 if (unlikely(handler == NULL)) {
374 printf("Xen IRQ%d (port %d) has no handler!\n",
376 mtx_unlock(&irq_evtchn[irq].lock);
379 irq_evtchn[irq].in_handler = 1;
380 mtx_unlock(&irq_evtchn[irq].lock);
382 //local_irq_enable();
385 handler(handler_arg);
388 //local_irq_disable();
390 mtx_lock(&irq_evtchn[irq].lock);
391 irq_evtchn[irq].in_handler = 0;
392 mtx_unlock(&irq_evtchn[irq].lock);
394 /* if this is the final port processed, we'll pick up here+1 next time */
395 pc->pc_last_processed_l1i = l1i;
396 pc->pc_last_processed_l2i = l2i;
398 } while (l2i != LONG_BIT - 1);
400 l2 = active_evtchns(cpu, s, l1i);
401 if (l2 == 0) /* we handled all ports, so we can clear the selector bit */
409 struct xenpci_softc *scp = device_get_softc(xenpci_device);
412 * Take our interrupt handler out of the list of handlers
413 * that can handle this irq.
415 if (scp->intr_cookie != NULL) {
416 if (BUS_TEARDOWN_INTR(device_get_parent(xenpci_device),
417 xenpci_device, scp->res_irq, scp->intr_cookie) != 0)
418 printf("intr teardown failed.. continuing\n");
419 scp->intr_cookie = NULL;
426 struct xenpci_softc *scp = device_get_softc(xenpci_device);
429 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) {
431 evtchn_to_irq[evtchn] = -1;
434 for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++)
435 irq_evtchn[irq].evtchn = 0;
437 BUS_SETUP_INTR(device_get_parent(xenpci_device),
438 xenpci_device, scp->res_irq, INTR_TYPE_MISC,
439 NULL, evtchn_interrupt, NULL, &scp->intr_cookie);
443 xenpci_irq_init(device_t device, struct xenpci_softc *scp)
448 mtx_init(&irq_alloc_lock, "xen-irq-lock", NULL, MTX_DEF);
450 for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++)
451 mtx_init(&irq_evtchn[irq].lock, "irq-evtchn", NULL, MTX_DEF);
453 for (cpu = 0; cpu < mp_ncpus; cpu++) {
454 pcpu_find(cpu)->pc_last_processed_l1i = LONG_BIT - 1;
455 pcpu_find(cpu)->pc_last_processed_l2i = LONG_BIT - 1;
458 error = BUS_SETUP_INTR(device_get_parent(device), device,
459 scp->res_irq, INTR_MPSAFE|INTR_TYPE_MISC, NULL, evtchn_interrupt,
460 NULL, &scp->intr_cookie);
464 xenpci_device = device;