2 * Copyright (c) 1991 The Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * Copyright (c) 2001 Jake Burkholder.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * from: @(#)isa.c 7.2 (Berkeley) 5/13/91
58 * form: src/sys/i386/isa/intr_machdep.c,v 1.57 2001/07/20
61 #include <sys/cdefs.h>
62 __FBSDID("$FreeBSD$");
64 #include <sys/param.h>
65 #include <sys/systm.h>
67 #include <sys/errno.h>
68 #include <sys/interrupt.h>
69 #include <sys/kernel.h>
71 #include <sys/mutex.h>
77 #include <machine/frame.h>
78 #include <machine/intr_machdep.h>
80 #define MAX_STRAY_LOG 5
82 CTASSERT((1 << IV_SHIFT) == sizeof(struct intr_vector));
84 ih_func_t *intr_handlers[PIL_MAX];
85 uint16_t pil_countp[PIL_MAX];
87 struct intr_vector intr_vectors[IV_MAX];
88 uint16_t intr_countp[IV_MAX];
89 static u_long intr_stray_count[IV_MAX];
91 static const char *const pil_names[] = {
94 "ithrd", /* PIL_ITHREAD */
95 "rndzvs", /* PIL_RENDEZVOUS */
97 "stop", /* PIL_STOP */
98 "preempt", /* PIL_PREEMPT */
99 "stray", "stray", "stray", "stray", "stray",
100 "filter", /* PIL_FILTER */
101 "fast", /* PIL_FAST */
102 "tick", /* PIL_TICK */
105 /* protect the intr_vectors table */
106 static struct sx intr_table_lock;
107 /* protect intrcnt_index */
108 static struct mtx intrcnt_lock;
111 static int assign_cpu;
113 static void intr_assign_next_cpu(struct intr_vector *iv);
114 static void intr_shuffle_irqs(void *arg __unused);
117 static int intr_assign_cpu(void *arg, u_char cpu);
118 static void intr_execute_handlers(void *);
119 static void intr_stray_level(struct trapframe *);
120 static void intr_stray_vector(void *);
121 static int intrcnt_setname(const char *, int);
122 static void intrcnt_updatename(int, const char *, int);
125 intrcnt_updatename(int vec, const char *name, int ispil)
127 static int intrcnt_index, stray_pil_index, stray_vec_index;
130 mtx_lock_spin(&intrcnt_lock);
131 if (intrnames[0] == '\0') {
134 printf("initalizing intr_countp\n");
135 intrcnt_setname("???", intrcnt_index++);
137 stray_vec_index = intrcnt_index++;
138 intrcnt_setname("stray", stray_vec_index);
139 for (name_index = 0; name_index < IV_MAX; name_index++)
140 intr_countp[name_index] = stray_vec_index;
142 stray_pil_index = intrcnt_index++;
143 intrcnt_setname("pil", stray_pil_index);
144 for (name_index = 0; name_index < PIL_MAX; name_index++)
145 pil_countp[name_index] = stray_pil_index;
151 if (!ispil && intr_countp[vec] != stray_vec_index)
152 name_index = intr_countp[vec];
153 else if (ispil && pil_countp[vec] != stray_pil_index)
154 name_index = pil_countp[vec];
156 name_index = intrcnt_index++;
158 if (intrcnt_setname(name, name_index))
162 intr_countp[vec] = name_index;
164 pil_countp[vec] = name_index;
165 mtx_unlock_spin(&intrcnt_lock);
169 intrcnt_setname(const char *name, int index)
172 if (intrnames + (MAXCOMLEN + 1) * index >= eintrnames)
174 snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s",
180 intr_setup(int pri, ih_func_t *ihf, int vec, iv_func_t *ivf, void *iva)
182 char pilname[MAXCOMLEN + 1];
187 intr_vectors[vec].iv_func = ivf;
188 intr_vectors[vec].iv_arg = iva;
189 intr_vectors[vec].iv_pri = pri;
190 intr_vectors[vec].iv_vec = vec;
192 intr_handlers[pri] = ihf;
194 snprintf(pilname, MAXCOMLEN + 1, "pil%d: %s", pri, pil_names[pri]);
195 intrcnt_updatename(pri, pilname, 1);
199 intr_stray_level(struct trapframe *tf)
202 printf("stray level interrupt %ld\n", tf->tf_level);
206 intr_stray_vector(void *cookie)
208 struct intr_vector *iv;
211 if (intr_stray_count[iv->iv_vec] < MAX_STRAY_LOG) {
212 printf("stray vector interrupt %d\n", iv->iv_vec);
213 intr_stray_count[iv->iv_vec]++;
214 if (intr_stray_count[iv->iv_vec] >= MAX_STRAY_LOG)
215 printf("got %d stray interrupt %d's: not logging "
216 "anymore\n", MAX_STRAY_LOG, iv->iv_vec);
225 /* Mark all interrupts as being stray. */
226 for (i = 0; i < PIL_MAX; i++)
227 intr_handlers[i] = intr_stray_level;
228 for (i = 0; i < IV_MAX; i++) {
229 intr_vectors[i].iv_func = intr_stray_vector;
230 intr_vectors[i].iv_arg = &intr_vectors[i];
231 intr_vectors[i].iv_pri = PIL_LOW;
232 intr_vectors[i].iv_vec = i;
233 intr_vectors[i].iv_refcnt = 0;
235 intr_handlers[PIL_LOW] = intr_fast;
242 sx_init(&intr_table_lock, "intr sources");
243 mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN);
247 intr_assign_cpu(void *arg, u_char cpu)
251 struct intr_vector *iv;
254 * Don't do anything during early boot. We will pick up the
255 * assignment once the APs are started.
257 if (assign_cpu && cpu != NOCPU) {
262 sx_xlock(&intr_table_lock);
263 iv->iv_mid = pc->pc_mid;
264 iv->iv_ic->ic_assign(iv);
265 sx_xunlock(&intr_table_lock);
274 intr_execute_handlers(void *cookie)
276 struct intr_vector *iv;
279 if (__predict_false(intr_event_handle(iv->iv_event, NULL) != 0))
280 intr_stray_vector(iv);
284 intr_controller_register(int vec, const struct intr_controller *ic,
287 struct intr_event *ie;
288 struct intr_vector *iv;
291 if (vec < 0 || vec >= IV_MAX)
293 sx_xlock(&intr_table_lock);
294 iv = &intr_vectors[vec];
296 sx_xunlock(&intr_table_lock);
299 error = intr_event_create(&ie, iv, 0, vec, NULL, ic->ic_clear,
300 ic->ic_clear, intr_assign_cpu, "vec%d:", vec);
303 sx_xlock(&intr_table_lock);
304 if (iv->iv_event != NULL) {
305 sx_xunlock(&intr_table_lock);
306 intr_event_destroy(ie);
310 iv->iv_icarg = icarg;
312 iv->iv_mid = PCPU_GET(mid);
313 sx_xunlock(&intr_table_lock);
318 inthand_add(const char *name, int vec, driver_filter_t *filt,
319 driver_intr_t *handler, void *arg, int flags, void **cookiep)
321 const struct intr_controller *ic;
322 struct intr_event *ie;
323 struct intr_handler *ih;
324 struct intr_vector *iv;
327 if (vec < 0 || vec >= IV_MAX)
330 * INTR_FAST filters/handlers are special purpose only, allowing
331 * them to be shared just would complicate things unnecessarily.
333 if ((flags & INTR_FAST) != 0 && (flags & INTR_EXCL) == 0)
335 sx_xlock(&intr_table_lock);
336 iv = &intr_vectors[vec];
339 sx_xunlock(&intr_table_lock);
340 if (ic == NULL || ie == NULL)
342 error = intr_event_add_handler(ie, name, filt, handler, arg,
343 intr_priority(flags), flags, cookiep);
346 sx_xlock(&intr_table_lock);
347 /* Disable the interrupt while we fiddle with it. */
350 if (iv->iv_refcnt == 1)
351 intr_setup((flags & INTR_FAST) != 0 ? PIL_FAST :
352 filt != NULL ? PIL_FILTER : PIL_ITHREAD, intr_fast,
353 vec, intr_execute_handlers, iv);
354 else if (filt != NULL) {
356 * Check if we need to upgrade from PIL_ITHREAD to PIL_FILTER.
357 * Given that apart from the on-board SCCs and UARTs shared
358 * interrupts are rather uncommon on sparc64 this sould be
359 * pretty rare in practice.
362 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
363 if (ih->ih_filter != NULL && ih->ih_filter != filt) {
369 intr_setup(PIL_FILTER, intr_fast, vec,
370 intr_execute_handlers, iv);
372 intr_stray_count[vec] = 0;
373 intrcnt_updatename(vec, ie->ie_fullname, 0);
376 intr_assign_next_cpu(iv);
379 /* Ensure the interrupt is cleared, it might have triggered before. */
380 if (ic->ic_clear != NULL)
382 sx_xunlock(&intr_table_lock);
387 inthand_remove(int vec, void *cookie)
389 struct intr_vector *iv;
392 if (vec < 0 || vec >= IV_MAX)
394 error = intr_event_remove_handler(cookie);
397 * XXX: maybe this should be done regardless of whether
398 * intr_event_remove_handler() succeeded?
400 sx_xlock(&intr_table_lock);
401 iv = &intr_vectors[vec];
403 if (iv->iv_refcnt == 0) {
405 * Don't disable the interrupt for now, so that
406 * stray interrupts get detected...
408 intr_setup(PIL_LOW, intr_fast, vec,
409 intr_stray_vector, iv);
411 sx_xunlock(&intr_table_lock);
416 /* Add a description to an active interrupt handler. */
418 intr_describe(int vec, void *ih, const char *descr)
420 struct intr_vector *iv;
423 if (vec < 0 || vec >= IV_MAX)
425 sx_xlock(&intr_table_lock);
426 iv = &intr_vectors[vec];
428 sx_xunlock(&intr_table_lock);
431 error = intr_event_describe_handler(iv->iv_event, ih, descr);
433 sx_xunlock(&intr_table_lock);
436 intrcnt_updatename(vec, iv->iv_event->ie_fullname, 0);
437 sx_xunlock(&intr_table_lock);
443 * Support for balancing interrupt sources across CPUs. For now we just
444 * allocate CPUs round-robin.
447 /* The BSP is always a valid target. */
448 static cpumask_t intr_cpus = (1 << 0);
449 static int current_cpu;
452 intr_assign_next_cpu(struct intr_vector *iv)
456 sx_assert(&intr_table_lock, SA_XLOCKED);
459 * Assign this source to a CPU in a round-robin fashion.
461 pc = pcpu_find(current_cpu);
464 iv->iv_mid = pc->pc_mid;
465 iv->iv_ic->ic_assign(iv);
468 if (current_cpu > mp_maxid)
470 } while (!(intr_cpus & (1 << current_cpu)));
473 /* Attempt to bind the specified IRQ to the specified CPU. */
475 intr_bind(int vec, u_char cpu)
477 struct intr_vector *iv;
480 if (vec < 0 || vec >= IV_MAX)
482 sx_xlock(&intr_table_lock);
483 iv = &intr_vectors[vec];
485 sx_xunlock(&intr_table_lock);
488 error = intr_event_bind(iv->iv_event, cpu);
489 sx_xunlock(&intr_table_lock);
494 * Add a CPU to our mask of valid CPUs that can be destinations of
498 intr_add_cpu(u_int cpu)
502 panic("%s: Invalid CPU ID", __func__);
504 printf("INTR: Adding CPU %d as a target\n", cpu);
506 intr_cpus |= (1 << cpu);
510 * Distribute all the interrupt sources among the available CPUs once the
511 * APs have been launched.
514 intr_shuffle_irqs(void *arg __unused)
517 struct intr_vector *iv;
520 /* Don't bother on UP. */
524 sx_xlock(&intr_table_lock);
526 for (i = 0; i < IV_MAX; i++) {
527 iv = &intr_vectors[i];
528 if (iv != NULL && iv->iv_refcnt > 0) {
530 * If this event is already bound to a CPU,
531 * then assign the source to that CPU instead
532 * of picking one via round-robin.
534 if (iv->iv_event->ie_cpu != NOCPU &&
535 (pc = pcpu_find(iv->iv_event->ie_cpu)) != NULL) {
536 iv->iv_mid = pc->pc_mid;
537 iv->iv_ic->ic_assign(iv);
539 intr_assign_next_cpu(iv);
542 sx_xunlock(&intr_table_lock);
544 SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs,