2 * Copyright (c) 1991 The Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * Copyright (c) 2001 Jake Burkholder.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * from: @(#)isa.c 7.2 (Berkeley) 5/13/91
58 * form: src/sys/i386/isa/intr_machdep.c,v 1.57 2001/07/20
61 #include <sys/cdefs.h>
62 __FBSDID("$FreeBSD$");
64 #include <sys/param.h>
65 #include <sys/systm.h>
67 #include <sys/errno.h>
68 #include <sys/interrupt.h>
69 #include <sys/kernel.h>
71 #include <sys/mutex.h>
76 #include <sys/vmmeter.h>
78 #include <machine/frame.h>
79 #include <machine/intr_machdep.h>
81 #define MAX_STRAY_LOG 5
83 CTASSERT((1 << IV_SHIFT) == sizeof(struct intr_vector));
85 ih_func_t *intr_handlers[PIL_MAX];
86 uint16_t pil_countp[PIL_MAX];
87 static uint16_t pil_stray_count[PIL_MAX];
89 struct intr_vector intr_vectors[IV_MAX];
90 uint16_t intr_countp[IV_MAX];
91 static uint16_t intr_stray_count[IV_MAX];
93 static const char *const pil_names[] = {
96 "preempt", /* PIL_PREEMPT */
97 "ithrd", /* PIL_ITHREAD */
98 "rndzvs", /* PIL_RENDEZVOUS */
100 "hardclock", /* PIL_HARDCLOCK */
101 "stray", "stray", "stray", "stray",
102 "filter", /* PIL_FILTER */
103 "bridge", /* PIL_BRIDGE */
104 "stop", /* PIL_STOP */
105 "tick", /* PIL_TICK */
108 /* protect the intr_vectors table */
109 static struct sx intr_table_lock;
110 /* protect intrcnt_index */
111 static struct mtx intrcnt_lock;
114 static int assign_cpu;
116 static void intr_assign_next_cpu(struct intr_vector *iv);
117 static void intr_shuffle_irqs(void *arg __unused);
120 static int intr_assign_cpu(void *arg, int cpu);
121 static void intr_execute_handlers(void *);
122 static void intr_stray_level(struct trapframe *);
123 static void intr_stray_vector(void *);
124 static int intrcnt_setname(const char *, int);
125 static void intrcnt_updatename(int, const char *, int);
126 void counter_intr_inc(void);
129 intrcnt_updatename(int vec, const char *name, int ispil)
131 static int intrcnt_index, stray_pil_index, stray_vec_index;
134 mtx_lock_spin(&intrcnt_lock);
135 if (intrnames[0] == '\0') {
138 printf("initalizing intr_countp\n");
139 intrcnt_setname("???", intrcnt_index++);
141 stray_vec_index = intrcnt_index++;
142 intrcnt_setname("stray", stray_vec_index);
143 for (name_index = 0; name_index < IV_MAX; name_index++)
144 intr_countp[name_index] = stray_vec_index;
146 stray_pil_index = intrcnt_index++;
147 intrcnt_setname("pil", stray_pil_index);
148 for (name_index = 0; name_index < PIL_MAX; name_index++)
149 pil_countp[name_index] = stray_pil_index;
155 if (!ispil && intr_countp[vec] != stray_vec_index)
156 name_index = intr_countp[vec];
157 else if (ispil && pil_countp[vec] != stray_pil_index)
158 name_index = pil_countp[vec];
160 name_index = intrcnt_index++;
162 if (intrcnt_setname(name, name_index))
166 intr_countp[vec] = name_index;
168 pil_countp[vec] = name_index;
169 mtx_unlock_spin(&intrcnt_lock);
173 intrcnt_setname(const char *name, int index)
176 if ((MAXCOMLEN + 1) * index >= sintrnames)
178 snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s",
184 intr_setup(int pri, ih_func_t *ihf, int vec, iv_func_t *ivf, void *iva)
186 char pilname[MAXCOMLEN + 1];
191 intr_vectors[vec].iv_func = ivf;
192 intr_vectors[vec].iv_arg = iva;
193 intr_vectors[vec].iv_pri = pri;
194 intr_vectors[vec].iv_vec = vec;
196 intr_handlers[pri] = ihf;
198 snprintf(pilname, MAXCOMLEN + 1, "pil%d: %s", pri, pil_names[pri]);
199 intrcnt_updatename(pri, pilname, 1);
203 intr_stray_level(struct trapframe *tf)
207 level = tf->tf_level;
208 if (pil_stray_count[level] < MAX_STRAY_LOG) {
209 printf("stray level interrupt %ld\n", level);
210 pil_stray_count[level]++;
211 if (pil_stray_count[level] >= MAX_STRAY_LOG)
212 printf("got %d stray level interrupt %ld's: not "
213 "logging anymore\n", MAX_STRAY_LOG, level);
218 intr_stray_vector(void *cookie)
220 struct intr_vector *iv;
225 if (intr_stray_count[vec] < MAX_STRAY_LOG) {
226 printf("stray vector interrupt %d\n", vec);
227 intr_stray_count[vec]++;
228 if (intr_stray_count[vec] >= MAX_STRAY_LOG)
229 printf("got %d stray vector interrupt %d's: not "
230 "logging anymore\n", MAX_STRAY_LOG, vec);
239 /* Mark all interrupts as being stray. */
240 for (i = 0; i < PIL_MAX; i++)
241 intr_handlers[i] = intr_stray_level;
242 for (i = 0; i < IV_MAX; i++) {
243 intr_vectors[i].iv_func = intr_stray_vector;
244 intr_vectors[i].iv_arg = &intr_vectors[i];
245 intr_vectors[i].iv_pri = PIL_LOW;
246 intr_vectors[i].iv_vec = i;
247 intr_vectors[i].iv_refcnt = 0;
249 intr_handlers[PIL_LOW] = intr_fast;
256 sx_init(&intr_table_lock, "intr sources");
257 mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN);
261 intr_assign_cpu(void *arg, int cpu)
265 struct intr_vector *iv;
268 * Don't do anything during early boot. We will pick up the
269 * assignment once the APs are started.
271 if (assign_cpu && cpu != NOCPU) {
276 sx_xlock(&intr_table_lock);
277 iv->iv_mid = pc->pc_mid;
278 iv->iv_ic->ic_assign(iv);
279 sx_xunlock(&intr_table_lock);
288 intr_execute_handlers(void *cookie)
290 struct intr_vector *iv;
293 if (__predict_false(intr_event_handle(iv->iv_event, NULL) != 0))
294 intr_stray_vector(iv);
298 intr_controller_register(int vec, const struct intr_controller *ic,
301 struct intr_event *ie;
302 struct intr_vector *iv;
305 if (vec < 0 || vec >= IV_MAX)
307 sx_xlock(&intr_table_lock);
308 iv = &intr_vectors[vec];
310 sx_xunlock(&intr_table_lock);
313 error = intr_event_create(&ie, iv, 0, vec, NULL, ic->ic_clear,
314 ic->ic_clear, intr_assign_cpu, "vec%d:", vec);
317 sx_xlock(&intr_table_lock);
318 if (iv->iv_event != NULL) {
319 sx_xunlock(&intr_table_lock);
320 intr_event_destroy(ie);
324 iv->iv_icarg = icarg;
326 iv->iv_mid = PCPU_GET(mid);
327 sx_xunlock(&intr_table_lock);
332 inthand_add(const char *name, int vec, driver_filter_t *filt,
333 driver_intr_t *handler, void *arg, int flags, void **cookiep)
335 const struct intr_controller *ic;
336 struct intr_event *ie;
337 struct intr_handler *ih;
338 struct intr_vector *iv;
341 if (vec < 0 || vec >= IV_MAX)
344 * INTR_BRIDGE filters/handlers are special purpose only, allowing
345 * them to be shared just would complicate things unnecessarily.
347 if ((flags & INTR_BRIDGE) != 0 && (flags & INTR_EXCL) == 0)
349 sx_xlock(&intr_table_lock);
350 iv = &intr_vectors[vec];
353 sx_xunlock(&intr_table_lock);
354 if (ic == NULL || ie == NULL)
356 error = intr_event_add_handler(ie, name, filt, handler, arg,
357 intr_priority(flags), flags, cookiep);
360 sx_xlock(&intr_table_lock);
361 /* Disable the interrupt while we fiddle with it. */
364 if (iv->iv_refcnt == 1)
365 intr_setup((flags & INTR_BRIDGE) != 0 ? PIL_BRIDGE :
366 filt != NULL ? PIL_FILTER : PIL_ITHREAD, intr_fast,
367 vec, intr_execute_handlers, iv);
368 else if (filt != NULL) {
370 * Check if we need to upgrade from PIL_ITHREAD to PIL_FILTER.
371 * Given that apart from the on-board SCCs and UARTs shared
372 * interrupts are rather uncommon on sparc64 this should be
373 * pretty rare in practice.
376 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
377 if (ih->ih_filter != NULL && ih->ih_filter != filt) {
383 intr_setup(PIL_FILTER, intr_fast, vec,
384 intr_execute_handlers, iv);
386 intr_stray_count[vec] = 0;
387 intrcnt_updatename(vec, ie->ie_fullname, 0);
390 intr_assign_next_cpu(iv);
393 /* Ensure the interrupt is cleared, it might have triggered before. */
394 if (ic->ic_clear != NULL)
396 sx_xunlock(&intr_table_lock);
401 inthand_remove(int vec, void *cookie)
403 struct intr_vector *iv;
406 if (vec < 0 || vec >= IV_MAX)
408 error = intr_event_remove_handler(cookie);
411 * XXX: maybe this should be done regardless of whether
412 * intr_event_remove_handler() succeeded?
414 sx_xlock(&intr_table_lock);
415 iv = &intr_vectors[vec];
417 if (iv->iv_refcnt == 0) {
419 * Don't disable the interrupt for now, so that
420 * stray interrupts get detected...
422 intr_setup(PIL_LOW, intr_fast, vec,
423 intr_stray_vector, iv);
425 sx_xunlock(&intr_table_lock);
430 /* Add a description to an active interrupt handler. */
432 intr_describe(int vec, void *ih, const char *descr)
434 struct intr_vector *iv;
437 if (vec < 0 || vec >= IV_MAX)
439 sx_xlock(&intr_table_lock);
440 iv = &intr_vectors[vec];
442 sx_xunlock(&intr_table_lock);
445 error = intr_event_describe_handler(iv->iv_event, ih, descr);
447 sx_xunlock(&intr_table_lock);
450 intrcnt_updatename(vec, iv->iv_event->ie_fullname, 0);
451 sx_xunlock(&intr_table_lock);
456 * Do VM_CNT_INC(intr), being in the interrupt context already. This is
457 * called from assembly.
458 * To avoid counter_enter() and appropriate assertion, unwrap VM_CNT_INC()
459 * and hardcode the actual increment.
462 counter_intr_inc(void)
465 *(uint64_t *)zpcpu_get(vm_cnt.v_intr) += 1;
470 * Support for balancing interrupt sources across CPUs. For now we just
471 * allocate CPUs round-robin.
474 static cpuset_t intr_cpus = CPUSET_T_INITIALIZER(0x1);
475 static int current_cpu;
478 intr_assign_next_cpu(struct intr_vector *iv)
482 sx_assert(&intr_table_lock, SA_XLOCKED);
485 * Assign this source to a CPU in a round-robin fashion.
487 pc = pcpu_find(current_cpu);
490 iv->iv_mid = pc->pc_mid;
491 iv->iv_ic->ic_assign(iv);
494 if (current_cpu > mp_maxid)
496 } while (!CPU_ISSET(current_cpu, &intr_cpus));
499 /* Attempt to bind the specified IRQ to the specified CPU. */
501 intr_bind(int vec, u_char cpu)
503 struct intr_vector *iv;
506 if (vec < 0 || vec >= IV_MAX)
508 sx_xlock(&intr_table_lock);
509 iv = &intr_vectors[vec];
511 sx_xunlock(&intr_table_lock);
514 error = intr_event_bind(iv->iv_event, cpu);
515 sx_xunlock(&intr_table_lock);
520 * Add a CPU to our mask of valid CPUs that can be destinations of
524 intr_add_cpu(u_int cpu)
528 panic("%s: Invalid CPU ID", __func__);
530 printf("INTR: Adding CPU %d as a target\n", cpu);
532 CPU_SET(cpu, &intr_cpus);
536 * Distribute all the interrupt sources among the available CPUs once the
537 * APs have been launched.
540 intr_shuffle_irqs(void *arg __unused)
543 struct intr_vector *iv;
546 /* Don't bother on UP. */
550 sx_xlock(&intr_table_lock);
552 for (i = 0; i < IV_MAX; i++) {
553 iv = &intr_vectors[i];
554 if (iv != NULL && iv->iv_refcnt > 0) {
556 * If this event is already bound to a CPU,
557 * then assign the source to that CPU instead
558 * of picking one via round-robin.
560 if (iv->iv_event->ie_cpu != NOCPU &&
561 (pc = pcpu_find(iv->iv_event->ie_cpu)) != NULL) {
562 iv->iv_mid = pc->pc_mid;
563 iv->iv_ic->ic_assign(iv);
565 intr_assign_next_cpu(iv);
568 sx_xunlock(&intr_table_lock);
570 SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs,