2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * Machine dependent interrupt code for x86. For x86, we have to
33 * deal with different PICs. Thus, we use the passed in vector to lookup
34 * an interrupt source associated with that vector. The interrupt source
35 * describes which PIC the source belongs to and includes methods to handle
39 #include "opt_atpic.h"
43 #include <sys/param.h>
45 #include <sys/interrupt.h>
47 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mutex.h>
52 #include <sys/queue.h>
56 #include <sys/sysctl.h>
57 #include <sys/syslog.h>
58 #include <sys/systm.h>
59 #include <sys/taskqueue.h>
60 #include <sys/vmmeter.h>
61 #include <machine/clock.h>
62 #include <machine/intr_machdep.h>
63 #include <machine/smp.h>
69 #include <machine/segments.h>
70 #include <machine/frame.h>
71 #include <dev/ic/i8259.h>
72 #include <x86/isa/icu.h>
73 #include <isa/isareg.h>
78 #define MAX_STRAY_LOG 5
80 typedef void (*mask_fn)(void *);
82 static int intrcnt_index;
83 static struct intsrc **interrupt_sources;
85 static struct intsrc **interrupt_sorted;
86 static int intrbalance;
87 SYSCTL_INT(_hw, OID_AUTO, intrbalance, CTLFLAG_RW, &intrbalance, 0,
88 "Interrupt auto-balance interval (seconds). Zero disables.");
89 static struct timeout_task intrbalance_task;
91 static struct sx intrsrc_lock;
92 static struct mtx intrpic_lock;
93 static struct mtx intrcnt_lock;
94 static TAILQ_HEAD(pics_head, pic) pics;
97 #if defined(SMP) && !defined(EARLY_AP_STARTUP)
98 static int assign_cpu;
103 size_t sintrcnt = sizeof(intrcnt);
104 size_t sintrnames = sizeof(intrnames);
107 static MALLOC_DEFINE(M_INTR, "intr", "Interrupt Sources");
109 static int intr_assign_cpu(void *arg, int cpu);
110 static void intr_disable_src(void *arg);
111 static void intr_init(void *__dummy);
112 static int intr_pic_registered(struct pic *pic);
113 static void intrcnt_setname(const char *name, int index);
114 static void intrcnt_updatename(struct intsrc *is);
115 static void intrcnt_register(struct intsrc *is);
118 * SYSINIT levels for SI_SUB_INTR:
120 * SI_ORDER_FIRST: Initialize locks and pics TAILQ, xen_hvm_cpu_init
121 * SI_ORDER_SECOND: Xen PICs
122 * SI_ORDER_THIRD: Add I/O APIC PICs, alloc MSI and Xen IRQ ranges
123 * SI_ORDER_FOURTH: Add 8259A PICs
124 * SI_ORDER_FOURTH + 1: Finalize interrupt count and add interrupt sources
125 * SI_ORDER_MIDDLE: SMP interrupt counters
126 * SI_ORDER_ANY: Enable interrupts on BSP
130 intr_pic_registered(struct pic *pic)
134 TAILQ_FOREACH(p, &pics, pics) {
142 * Register a new interrupt controller (PIC). This is to support suspend
143 * and resume where we suspend/resume controllers rather than individual
144 * sources. This also allows controllers with no active sources (such as
145 * 8259As in a system using the APICs) to participate in suspend and resume.
148 intr_register_pic(struct pic *pic)
152 mtx_lock(&intrpic_lock);
153 if (intr_pic_registered(pic))
156 TAILQ_INSERT_TAIL(&pics, pic, pics);
159 mtx_unlock(&intrpic_lock);
164 * Allocate interrupt source arrays and register interrupt sources
165 * once the number of interrupts is known.
168 intr_init_sources(void *arg)
172 MPASS(num_io_irqs > 0);
174 interrupt_sources = mallocarray(num_io_irqs, sizeof(*interrupt_sources),
175 M_INTR, M_WAITOK | M_ZERO);
176 interrupt_sorted = mallocarray(num_io_irqs, sizeof(*interrupt_sorted),
177 M_INTR, M_WAITOK | M_ZERO);
180 * - 1 ??? dummy counter.
181 * - 2 counters for each I/O interrupt.
182 * - 1 counter for each CPU for lapic timer.
183 * - 1 counter for each CPU for the Hyper-V vmbus driver.
184 * - 8 counters for each CPU for IPI counters for SMP.
186 nintrcnt = 1 + num_io_irqs * 2 + mp_ncpus * 2;
189 nintrcnt += 8 * mp_ncpus;
191 intrcnt = mallocarray(nintrcnt, sizeof(u_long), M_INTR, M_WAITOK |
193 intrnames = mallocarray(nintrcnt, MAXCOMLEN + 1, M_INTR, M_WAITOK |
195 sintrcnt = nintrcnt * sizeof(u_long);
196 sintrnames = nintrcnt * (MAXCOMLEN + 1);
198 intrcnt_setname("???", 0);
202 * NB: intrpic_lock is not held here to avoid LORs due to
203 * malloc() in intr_register_source(). However, we are still
204 * single-threaded at this point in startup so the list of
205 * PICs shouldn't change.
207 TAILQ_FOREACH(pic, &pics, pics) {
208 if (pic->pic_register_sources != NULL)
209 pic->pic_register_sources(pic);
212 SYSINIT(intr_init_sources, SI_SUB_INTR, SI_ORDER_FOURTH + 1, intr_init_sources,
216 * Register a new interrupt source with the global interrupt system.
217 * The global interrupts need to be disabled when this function is
221 intr_register_source(struct intsrc *isrc)
225 KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC"));
226 vector = isrc->is_pic->pic_vector(isrc);
227 KASSERT(vector < num_io_irqs, ("IRQ %d too large (%u irqs)", vector,
229 if (interrupt_sources[vector] != NULL)
231 error = intr_event_create(&isrc->is_event, isrc, 0, vector,
232 intr_disable_src, (mask_fn)isrc->is_pic->pic_enable_source,
233 (mask_fn)isrc->is_pic->pic_eoi_source, intr_assign_cpu, "irq%d:",
237 sx_xlock(&intrsrc_lock);
238 if (interrupt_sources[vector] != NULL) {
239 sx_xunlock(&intrsrc_lock);
240 intr_event_destroy(isrc->is_event);
243 intrcnt_register(isrc);
244 interrupt_sources[vector] = isrc;
245 isrc->is_handlers = 0;
246 sx_xunlock(&intrsrc_lock);
251 intr_lookup_source(int vector)
254 if (vector < 0 || vector >= num_io_irqs)
256 return (interrupt_sources[vector]);
260 intr_add_handler(const char *name, int vector, driver_filter_t filter,
261 driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep,
267 isrc = intr_lookup_source(vector);
270 error = intr_event_add_handler(isrc->is_event, name, filter, handler,
271 arg, intr_priority(flags), flags, cookiep);
273 sx_xlock(&intrsrc_lock);
274 intrcnt_updatename(isrc);
276 if (isrc->is_handlers == 1) {
277 isrc->is_domain = domain;
278 isrc->is_pic->pic_enable_intr(isrc);
279 isrc->is_pic->pic_enable_source(isrc);
281 sx_xunlock(&intrsrc_lock);
287 intr_remove_handler(void *cookie)
292 isrc = intr_handler_source(cookie);
293 error = intr_event_remove_handler(cookie);
295 sx_xlock(&intrsrc_lock);
297 if (isrc->is_handlers == 0) {
298 isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI);
299 isrc->is_pic->pic_disable_intr(isrc);
301 intrcnt_updatename(isrc);
302 sx_xunlock(&intrsrc_lock);
308 intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol)
312 isrc = intr_lookup_source(vector);
315 return (isrc->is_pic->pic_config_intr(isrc, trig, pol));
319 intr_disable_src(void *arg)
324 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
328 intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
330 struct intr_event *ie;
334 * We count software interrupts when we process them. The
335 * code here follows previous practice, but there's an
336 * argument for counting hardware interrupts when they're
345 * XXX: We assume that IRQ 0 is only used for the ISA timer
348 vector = isrc->is_pic->pic_vector(isrc);
353 * For stray interrupts, mask and EOI the source, bump the
354 * stray count, and log the condition.
356 if (intr_event_handle(ie, frame) != 0) {
357 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
358 (*isrc->is_straycount)++;
359 if (*isrc->is_straycount < MAX_STRAY_LOG)
360 log(LOG_ERR, "stray irq%d\n", vector);
361 else if (*isrc->is_straycount == MAX_STRAY_LOG)
363 "too many stray irq %d's: not logging anymore\n",
369 intr_resume(bool suspend_cancelled)
376 mtx_lock(&intrpic_lock);
377 TAILQ_FOREACH(pic, &pics, pics) {
378 if (pic->pic_resume != NULL)
379 pic->pic_resume(pic, suspend_cancelled);
381 mtx_unlock(&intrpic_lock);
389 mtx_lock(&intrpic_lock);
390 TAILQ_FOREACH_REVERSE(pic, &pics, pics_head, pics) {
391 if (pic->pic_suspend != NULL)
392 pic->pic_suspend(pic);
394 mtx_unlock(&intrpic_lock);
398 intr_assign_cpu(void *arg, int cpu)
404 #ifdef EARLY_AP_STARTUP
405 MPASS(mp_ncpus == 1 || smp_started);
407 /* Nothing to do if there is only a single CPU. */
408 if (mp_ncpus > 1 && cpu != NOCPU) {
411 * Don't do anything during early boot. We will pick up the
412 * assignment once the APs are started.
414 if (assign_cpu && cpu != NOCPU) {
417 sx_xlock(&intrsrc_lock);
418 error = isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]);
421 sx_xunlock(&intrsrc_lock);
431 intrcnt_setname(const char *name, int index)
434 snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s",
439 intrcnt_updatename(struct intsrc *is)
442 intrcnt_setname(is->is_event->ie_fullname, is->is_index);
446 intrcnt_register(struct intsrc *is)
448 char straystr[MAXCOMLEN + 1];
450 KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__));
451 mtx_lock_spin(&intrcnt_lock);
452 MPASS(intrcnt_index + 2 <= nintrcnt);
453 is->is_index = intrcnt_index;
455 snprintf(straystr, MAXCOMLEN + 1, "stray irq%d",
456 is->is_pic->pic_vector(is));
457 intrcnt_updatename(is);
458 is->is_count = &intrcnt[is->is_index];
459 intrcnt_setname(straystr, is->is_index + 1);
460 is->is_straycount = &intrcnt[is->is_index + 1];
461 mtx_unlock_spin(&intrcnt_lock);
465 intrcnt_add(const char *name, u_long **countp)
468 mtx_lock_spin(&intrcnt_lock);
469 MPASS(intrcnt_index < nintrcnt);
470 *countp = &intrcnt[intrcnt_index];
471 intrcnt_setname(name, intrcnt_index);
473 mtx_unlock_spin(&intrcnt_lock);
477 intr_init(void *dummy __unused)
481 mtx_init(&intrpic_lock, "intrpic", NULL, MTX_DEF);
482 sx_init(&intrsrc_lock, "intrsrc");
483 mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN);
485 SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL);
488 intr_init_final(void *dummy __unused)
492 * Enable interrupts on the BSP after all of the interrupt
493 * controllers are initialized. Device interrupts are still
494 * disabled in the interrupt controllers until interrupt
495 * handlers are registered. Interrupts are enabled on each AP
496 * after their first context switch.
500 SYSINIT(intr_init_final, SI_SUB_INTR, SI_ORDER_ANY, intr_init_final, NULL);
503 /* Initialize the two 8259A's to a known-good shutdown state. */
508 outb(IO_ICU1, ICW1_RESET | ICW1_IC4);
509 outb(IO_ICU1 + ICU_IMR_OFFSET, IDT_IO_INTS);
510 outb(IO_ICU1 + ICU_IMR_OFFSET, IRQ_MASK(ICU_SLAVEID));
511 outb(IO_ICU1 + ICU_IMR_OFFSET, MASTER_MODE);
512 outb(IO_ICU1 + ICU_IMR_OFFSET, 0xff);
513 outb(IO_ICU1, OCW3_SEL | OCW3_RR);
515 outb(IO_ICU2, ICW1_RESET | ICW1_IC4);
516 outb(IO_ICU2 + ICU_IMR_OFFSET, IDT_IO_INTS + 8);
517 outb(IO_ICU2 + ICU_IMR_OFFSET, ICU_SLAVEID);
518 outb(IO_ICU2 + ICU_IMR_OFFSET, SLAVE_MODE);
519 outb(IO_ICU2 + ICU_IMR_OFFSET, 0xff);
520 outb(IO_ICU2, OCW3_SEL | OCW3_RR);
524 /* Add a description to an active interrupt handler. */
526 intr_describe(u_int vector, void *ih, const char *descr)
531 isrc = intr_lookup_source(vector);
534 error = intr_event_describe_handler(isrc->is_event, ih, descr);
537 intrcnt_updatename(isrc);
547 sx_xlock(&intrsrc_lock);
548 for (v = 0; v < num_io_irqs; v++) {
549 is = interrupt_sources[v];
552 if (is->is_pic->pic_reprogram_pin != NULL)
553 is->is_pic->pic_reprogram_pin(is);
555 sx_xunlock(&intrsrc_lock);
560 * Dump data about interrupt handlers
562 DB_SHOW_COMMAND(irqs, db_show_irqs)
564 struct intsrc **isrc;
568 if (strcmp(modif, "v") == 0)
572 isrc = interrupt_sources;
573 for (i = 0; i < num_io_irqs && !db_pager_quit; i++, isrc++)
575 db_dump_intr_event((*isrc)->is_event, verbose);
581 * Support for balancing interrupt sources across CPUs. For now we just
582 * allocate CPUs round-robin.
585 cpuset_t intr_cpus = CPUSET_T_INITIALIZER(0x1);
586 static int current_cpu[MAXMEMDOM];
593 for (i = 0; i < vm_ndomains; i++) {
595 if (!CPU_ISSET(current_cpu[i], &intr_cpus) ||
596 !CPU_ISSET(current_cpu[i], &cpuset_domain[i]))
602 * Return the CPU that the next interrupt source should use. For now
603 * this just returns the next local APIC according to round-robin.
606 intr_next_cpu(int domain)
610 #ifdef EARLY_AP_STARTUP
611 MPASS(mp_ncpus == 1 || smp_started);
613 return (PCPU_GET(apic_id));
615 /* Leave all interrupts on the BSP during boot. */
617 return (PCPU_GET(apic_id));
620 mtx_lock_spin(&icu_lock);
621 apic_id = cpu_apic_ids[current_cpu[domain]];
623 current_cpu[domain]++;
624 if (current_cpu[domain] > mp_maxid)
625 current_cpu[domain] = 0;
626 } while (!CPU_ISSET(current_cpu[domain], &intr_cpus) ||
627 !CPU_ISSET(current_cpu[domain], &cpuset_domain[domain]));
628 mtx_unlock_spin(&icu_lock);
632 /* Attempt to bind the specified IRQ to the specified CPU. */
634 intr_bind(u_int vector, u_char cpu)
638 isrc = intr_lookup_source(vector);
641 return (intr_event_bind(isrc->is_event, cpu));
645 * Add a CPU to our mask of valid CPUs that can be destinations of
649 intr_add_cpu(u_int cpu)
653 panic("%s: Invalid CPU ID", __func__);
655 printf("INTR: Adding local APIC %d as a target\n",
658 CPU_SET(cpu, &intr_cpus);
661 #ifdef EARLY_AP_STARTUP
663 intr_smp_startup(void *arg __unused)
669 SYSINIT(intr_smp_startup, SI_SUB_SMP, SI_ORDER_SECOND, intr_smp_startup,
674 * Distribute all the interrupt sources among the available CPUs once the
675 * AP's have been launched.
678 intr_shuffle_irqs(void *arg __unused)
684 /* Don't bother on UP. */
688 /* Round-robin assign a CPU to each enabled source. */
689 sx_xlock(&intrsrc_lock);
691 for (i = 0; i < num_io_irqs; i++) {
692 isrc = interrupt_sources[i];
693 if (isrc != NULL && isrc->is_handlers > 0) {
695 * If this event is already bound to a CPU,
696 * then assign the source to that CPU instead
697 * of picking one via round-robin. Note that
698 * this is careful to only advance the
699 * round-robin if the CPU assignment succeeds.
701 cpu = isrc->is_event->ie_cpu;
703 cpu = current_cpu[isrc->is_domain];
704 if (isrc->is_pic->pic_assign_cpu(isrc,
705 cpu_apic_ids[cpu]) == 0) {
707 if (isrc->is_event->ie_cpu == NOCPU)
708 intr_next_cpu(isrc->is_domain);
712 sx_xunlock(&intrsrc_lock);
714 SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs,
719 * TODO: Export this information in a non-MD fashion, integrate with vmstat -i.
722 sysctl_hw_intrs(SYSCTL_HANDLER_ARGS)
729 error = sysctl_wire_old_buffer(req, 0);
733 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
734 sx_slock(&intrsrc_lock);
735 for (i = 0; i < num_io_irqs; i++) {
736 isrc = interrupt_sources[i];
739 sbuf_printf(&sbuf, "%s:%d @cpu%d(domain%d): %ld\n",
740 isrc->is_event->ie_fullname,
747 sx_sunlock(&intrsrc_lock);
748 error = sbuf_finish(&sbuf);
752 SYSCTL_PROC(_hw, OID_AUTO, intrs, CTLTYPE_STRING | CTLFLAG_RW,
753 0, 0, sysctl_hw_intrs, "A", "interrupt:number @cpu: count");
756 * Compare two, possibly NULL, entries in the interrupt source array
760 intrcmp(const void *one, const void *two)
762 const struct intsrc *i1, *i2;
764 i1 = *(const struct intsrc * const *)one;
765 i2 = *(const struct intsrc * const *)two;
766 if (i1 != NULL && i2 != NULL)
767 return (*i1->is_count - *i2->is_count);
776 * Balance IRQs across available CPUs according to load.
779 intr_balance(void *dummy __unused, int pending __unused)
786 interval = intrbalance;
791 * Sort interrupts according to count.
793 sx_xlock(&intrsrc_lock);
794 memcpy(interrupt_sorted, interrupt_sources, num_io_irqs *
795 sizeof(interrupt_sorted[0]));
796 qsort(interrupt_sorted, num_io_irqs, sizeof(interrupt_sorted[0]),
800 * Restart the scan from the same location to avoid moving in the
806 * Assign round-robin from most loaded to least.
808 for (i = num_io_irqs - 1; i >= 0; i--) {
809 isrc = interrupt_sorted[i];
810 if (isrc == NULL || isrc->is_event->ie_cpu != NOCPU)
812 cpu = current_cpu[isrc->is_domain];
813 intr_next_cpu(isrc->is_domain);
814 if (isrc->is_cpu != cpu &&
815 isrc->is_pic->pic_assign_cpu(isrc,
816 cpu_apic_ids[cpu]) == 0)
819 sx_xunlock(&intrsrc_lock);
821 taskqueue_enqueue_timeout(taskqueue_thread, &intrbalance_task,
822 interval ? hz * interval : hz * 60);
827 intr_balance_init(void *dummy __unused)
830 TIMEOUT_TASK_INIT(taskqueue_thread, &intrbalance_task, 0, intr_balance,
832 taskqueue_enqueue_timeout(taskqueue_thread, &intrbalance_task, hz);
834 SYSINIT(intr_balance_init, SI_SUB_SMP, SI_ORDER_ANY, intr_balance_init, NULL);
838 * Always route interrupts to the current processor in the UP case.
841 intr_next_cpu(int domain)
844 return (PCPU_GET(apic_id));