2 * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * Machine dependent interrupt code for amd64. For amd64, we have to
34 * deal with different PICs. Thus, we use the passed in vector to lookup
35 * an interrupt source associated with that vector. The interrupt source
36 * describes which PIC the source belongs to and includes methods to handle
40 #include "opt_atpic.h"
43 #include <sys/param.h>
45 #include <sys/interrupt.h>
47 #include <sys/kernel.h>
49 #include <sys/mutex.h>
52 #include <sys/syslog.h>
53 #include <sys/systm.h>
55 #include <machine/clock.h>
56 #include <machine/intr_machdep.h>
57 #include <machine/smp.h>
63 #include <machine/segments.h>
64 #include <machine/frame.h>
65 #include <dev/ic/i8259.h>
66 #include <amd64/isa/icu.h>
67 #include <amd64/isa/isa.h>
70 #define MAX_STRAY_LOG 5
72 typedef void (*mask_fn)(void *);
74 static int intrcnt_index;
75 static struct intsrc *interrupt_sources[NUM_IO_INTS];
76 static struct sx intr_table_lock;
77 static struct mtx intrcnt_lock;
78 static STAILQ_HEAD(, pic) pics;
81 static void intr_eoi_src(void *arg);
82 static void intr_disab_eoi_src(void *arg);
83 static void intr_event_stray(void *cookie);
87 static int assign_cpu;
89 static void intr_assign_next_cpu(struct intsrc *isrc);
92 static int intr_assign_cpu(void *arg, u_char cpu);
93 static void intr_init(void *__dummy);
94 static int intr_pic_registered(struct pic *pic);
95 static void intrcnt_setname(const char *name, int index);
96 static void intrcnt_updatename(struct intsrc *is);
97 static void intrcnt_register(struct intsrc *is);
100 intr_pic_registered(struct pic *pic)
104 STAILQ_FOREACH(p, &pics, pics) {
112 * Register a new interrupt controller (PIC). This is to support suspend
113 * and resume where we suspend/resume controllers rather than individual
114 * sources. This also allows controllers with no active sources (such as
115 * 8259As in a system using the APICs) to participate in suspend and resume.
118 intr_register_pic(struct pic *pic)
122 sx_xlock(&intr_table_lock);
123 if (intr_pic_registered(pic))
126 STAILQ_INSERT_TAIL(&pics, pic, pics);
129 sx_xunlock(&intr_table_lock);
134 * Register a new interrupt source with the global interrupt system.
135 * The global interrupts need to be disabled when this function is
139 intr_register_source(struct intsrc *isrc)
143 KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC"));
144 vector = isrc->is_pic->pic_vector(isrc);
145 if (interrupt_sources[vector] != NULL)
148 error = intr_event_create(&isrc->is_event, isrc, 0,
149 (mask_fn)isrc->is_pic->pic_enable_source,
150 intr_eoi_src, intr_disab_eoi_src, intr_assign_cpu, "irq%d:",
153 error = intr_event_create(&isrc->is_event, isrc, 0,
154 (mask_fn)isrc->is_pic->pic_enable_source, intr_assign_cpu, "irq%d:",
159 sx_xlock(&intr_table_lock);
160 if (interrupt_sources[vector] != NULL) {
161 sx_xunlock(&intr_table_lock);
162 intr_event_destroy(isrc->is_event);
165 intrcnt_register(isrc);
166 interrupt_sources[vector] = isrc;
167 isrc->is_handlers = 0;
168 sx_xunlock(&intr_table_lock);
173 intr_lookup_source(int vector)
176 return (interrupt_sources[vector]);
180 intr_add_handler(const char *name, int vector, driver_filter_t filter,
181 driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep)
186 isrc = intr_lookup_source(vector);
189 error = intr_event_add_handler(isrc->is_event, name, filter, handler,
190 arg, intr_priority(flags), flags, cookiep);
192 sx_xlock(&intr_table_lock);
193 intrcnt_updatename(isrc);
195 if (isrc->is_handlers == 1) {
198 intr_assign_next_cpu(isrc);
200 isrc->is_pic->pic_enable_intr(isrc);
201 isrc->is_pic->pic_enable_source(isrc);
203 sx_xunlock(&intr_table_lock);
209 intr_remove_handler(void *cookie)
214 isrc = intr_handler_source(cookie);
215 error = intr_event_remove_handler(cookie);
217 sx_xlock(&intr_table_lock);
219 if (isrc->is_handlers == 0) {
220 isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI);
221 isrc->is_pic->pic_disable_intr(isrc);
223 intrcnt_updatename(isrc);
224 sx_xunlock(&intr_table_lock);
230 intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol)
234 isrc = intr_lookup_source(vector);
237 return (isrc->is_pic->pic_config_intr(isrc, trig, pol));
242 intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
245 struct intr_event *ie;
251 * We count software interrupts when we process them. The
252 * code here follows previous practice, but there's an
253 * argument for counting hardware interrupts when they're
257 PCPU_INC(cnt.v_intr);
262 * XXX: We assume that IRQ 0 is only used for the ISA timer
265 vector = isrc->is_pic->pic_vector(isrc);
269 if (intr_event_handle(ie, frame) != 0)
270 intr_event_stray(isrc);
274 intr_event_stray(void *cookie)
280 * For stray interrupts, mask and EOI the source, bump the
281 * stray count, and log the condition.
283 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
284 (*isrc->is_straycount)++;
285 if (*isrc->is_straycount < MAX_STRAY_LOG)
286 log(LOG_ERR, "stray irq%d\n", isrc->is_pic->pic_vector(isrc));
287 else if (*isrc->is_straycount == MAX_STRAY_LOG)
289 "too many stray irq %d's: not logging anymore\n",
290 isrc->is_pic->pic_vector(isrc));
294 intr_eoi_src(void *arg)
299 isrc->is_pic->pic_eoi_source(isrc);
303 intr_disab_eoi_src(void *arg)
308 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
312 intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
315 struct intr_event *ie;
316 struct intr_handler *ih;
317 int error, vector, thread, ret;
322 * We count software interrupts when we process them. The
323 * code here follows previous practice, but there's an
324 * argument for counting hardware interrupts when they're
328 PCPU_INC(cnt.v_intr);
333 * XXX: We assume that IRQ 0 is only used for the ISA timer
336 vector = isrc->is_pic->pic_vector(isrc);
341 * For stray interrupts, mask and EOI the source, bump the
342 * stray count, and log the condition.
344 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) {
345 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
346 (*isrc->is_straycount)++;
347 if (*isrc->is_straycount < MAX_STRAY_LOG)
348 log(LOG_ERR, "stray irq%d\n", vector);
349 else if (*isrc->is_straycount == MAX_STRAY_LOG)
351 "too many stray irq %d's: not logging anymore\n",
357 * Execute fast interrupt handlers directly.
358 * To support clock handlers, if a handler registers
359 * with a NULL argument, then we pass it a pointer to
360 * a trapframe as its argument.
362 td->td_intr_nesting_level++;
366 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
367 if (ih->ih_filter == NULL) {
371 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
372 ih->ih_filter, ih->ih_argument == NULL ? frame :
373 ih->ih_argument, ih->ih_name);
374 if (ih->ih_argument == NULL)
375 ret = ih->ih_filter(frame);
377 ret = ih->ih_filter(ih->ih_argument);
379 * Wrapper handler special case: see
380 * i386/intr_machdep.c::intr_execute_handlers()
383 if (ret == FILTER_SCHEDULE_THREAD)
389 * If there are any threaded handlers that need to run,
390 * mask the source as well as sending it an EOI. Otherwise,
391 * just send it an EOI but leave it unmasked.
394 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
396 isrc->is_pic->pic_eoi_source(isrc);
398 /* Schedule the ithread if needed. */
400 error = intr_event_schedule_thread(ie);
401 KASSERT(error == 0, ("bad stray interrupt"));
404 td->td_intr_nesting_level--;
416 sx_xlock(&intr_table_lock);
417 STAILQ_FOREACH(pic, &pics, pics) {
418 if (pic->pic_resume != NULL)
419 pic->pic_resume(pic);
421 sx_xunlock(&intr_table_lock);
429 sx_xlock(&intr_table_lock);
430 STAILQ_FOREACH(pic, &pics, pics) {
431 if (pic->pic_suspend != NULL)
432 pic->pic_suspend(pic);
434 sx_xunlock(&intr_table_lock);
438 intr_assign_cpu(void *arg, u_char cpu)
444 * Don't do anything during early boot. We will pick up the
445 * assignment once the APs are started.
447 if (assign_cpu && cpu != NOCPU) {
449 sx_xlock(&intr_table_lock);
450 isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]);
451 sx_xunlock(&intr_table_lock);
460 intrcnt_setname(const char *name, int index)
463 snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s",
468 intrcnt_updatename(struct intsrc *is)
471 intrcnt_setname(is->is_event->ie_fullname, is->is_index);
475 intrcnt_register(struct intsrc *is)
477 char straystr[MAXCOMLEN + 1];
479 KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__));
480 mtx_lock_spin(&intrcnt_lock);
481 is->is_index = intrcnt_index;
483 snprintf(straystr, MAXCOMLEN + 1, "stray irq%d",
484 is->is_pic->pic_vector(is));
485 intrcnt_updatename(is);
486 is->is_count = &intrcnt[is->is_index];
487 intrcnt_setname(straystr, is->is_index + 1);
488 is->is_straycount = &intrcnt[is->is_index + 1];
489 mtx_unlock_spin(&intrcnt_lock);
493 intrcnt_add(const char *name, u_long **countp)
496 mtx_lock_spin(&intrcnt_lock);
497 *countp = &intrcnt[intrcnt_index];
498 intrcnt_setname(name, intrcnt_index);
500 mtx_unlock_spin(&intrcnt_lock);
504 intr_init(void *dummy __unused)
507 intrcnt_setname("???", 0);
510 sx_init(&intr_table_lock, "intr sources");
511 mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN);
513 SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL);
516 /* Initialize the two 8259A's to a known-good shutdown state. */
521 outb(IO_ICU1, ICW1_RESET | ICW1_IC4);
522 outb(IO_ICU1 + ICU_IMR_OFFSET, IDT_IO_INTS);
523 outb(IO_ICU1 + ICU_IMR_OFFSET, 1 << 2);
524 outb(IO_ICU1 + ICU_IMR_OFFSET, ICW4_8086);
525 outb(IO_ICU1 + ICU_IMR_OFFSET, 0xff);
526 outb(IO_ICU1, OCW3_SEL | OCW3_RR);
528 outb(IO_ICU2, ICW1_RESET | ICW1_IC4);
529 outb(IO_ICU2 + ICU_IMR_OFFSET, IDT_IO_INTS + 8);
530 outb(IO_ICU2 + ICU_IMR_OFFSET, 2);
531 outb(IO_ICU2 + ICU_IMR_OFFSET, ICW4_8086);
532 outb(IO_ICU2 + ICU_IMR_OFFSET, 0xff);
533 outb(IO_ICU2, OCW3_SEL | OCW3_RR);
539 * Dump data about interrupt handlers
541 DB_SHOW_COMMAND(irqs, db_show_irqs)
543 struct intsrc **isrc;
546 if (strcmp(modif, "v") == 0)
550 isrc = interrupt_sources;
551 for (i = 0; i < NUM_IO_INTS && !db_pager_quit; i++, isrc++)
553 db_dump_intr_event((*isrc)->is_event, verbose);
559 * Support for balancing interrupt sources across CPUs. For now we just
560 * allocate CPUs round-robin.
563 /* The BSP is always a valid target. */
564 static cpumask_t intr_cpus = (1 << 0);
565 static int current_cpu;
568 intr_assign_next_cpu(struct intsrc *isrc)
572 * Assign this source to a local APIC in a round-robin fashion.
574 isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[current_cpu]);
577 if (current_cpu > mp_maxid)
579 } while (!(intr_cpus & (1 << current_cpu)));
582 /* Attempt to bind the specified IRQ to the specified CPU. */
584 intr_bind(u_int vector, u_char cpu)
588 isrc = intr_lookup_source(vector);
591 return (intr_event_bind(isrc->is_event, cpu));
595 * Add a CPU to our mask of valid CPUs that can be destinations of
599 intr_add_cpu(u_int cpu)
603 panic("%s: Invalid CPU ID", __func__);
605 printf("INTR: Adding local APIC %d as a target\n",
608 intr_cpus |= (1 << cpu);
612 * Distribute all the interrupt sources among the available CPUs once the
613 * AP's have been launched.
616 intr_shuffle_irqs(void *arg __unused)
621 /* Don't bother on UP. */
625 /* Round-robin assign a CPU to each enabled source. */
626 sx_xlock(&intr_table_lock);
628 for (i = 0; i < NUM_IO_INTS; i++) {
629 isrc = interrupt_sources[i];
630 if (isrc != NULL && isrc->is_handlers > 0) {
632 * If this event is already bound to a CPU,
633 * then assign the source to that CPU instead
634 * of picking one via round-robin.
636 if (isrc->is_event->ie_cpu != NOCPU)
637 isrc->is_pic->pic_assign_cpu(isrc,
638 cpu_apic_ids[isrc->is_event->ie_cpu]);
640 intr_assign_next_cpu(isrc);
643 sx_xunlock(&intr_table_lock);
645 SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs,