2 /* $NetBSD: interrupt.c,v 1.23 1998/02/24 07:38:01 thorpej Exp $ */
5 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
8 * Authors: Keith Bostic, Chris G. Demetriou
10 * Permission to use, copy, modify and distribute this software and
11 * its documentation is hereby granted, provided that both the copyright
12 * notice and this permission notice appear in all copies of the
13 * software, derivative works or modified versions, and any portions
14 * thereof, and that both notices appear in supporting documentation.
16 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
17 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
18 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
20 * Carnegie Mellon requests users of this software to return to
22 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
23 * School of Computer Science
24 * Carnegie Mellon University
25 * Pittsburgh PA 15213-3890
27 * any improvements or extensions that they make and grant Carnegie the
28 * rights to redistribute these changes.
31 * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center.
32 * Redistribute and modify at will, leaving only this additional copyright
38 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
44 #include <sys/vmmeter.h>
46 #include <sys/malloc.h>
49 #include <sys/mutex.h>
51 #include <sys/sysctl.h>
53 #include <machine/clock.h>
54 #include <machine/cpu.h>
55 #include <machine/fpu.h>
56 #include <machine/frame.h>
57 #include <machine/intr.h>
58 #include <machine/md_var.h>
59 #include <machine/pcb.h>
60 #include <machine/reg.h>
61 #include <machine/sapicvar.h>
62 #include <machine/smp.h>
65 struct evcnt clock_intr_evcnt; /* event counter for clock intrs. */
67 #include <sys/interrupt.h>
68 #include <machine/intrcnt.h>
76 extern int mp_ipi_test;
79 volatile int mc_expected, mc_received;
82 dummy_perf(unsigned long vector, struct trapframe *tf)
84 printf("performance interrupt!\n");
87 void (*perf_irq)(unsigned long, struct trapframe *) = dummy_perf;
89 static unsigned int ints[MAXCPU];
90 SYSCTL_OPAQUE(_debug, OID_AUTO, ints, CTLFLAG_RW, &ints, sizeof(ints), "IU",
93 static unsigned int clks[MAXCPU];
95 SYSCTL_OPAQUE(_debug, OID_AUTO, clks, CTLFLAG_RW, &clks, sizeof(clks), "IU",
98 SYSCTL_INT(_debug, OID_AUTO, clks, CTLFLAG_RW, clks, 0, "");
102 static unsigned int asts[MAXCPU];
103 SYSCTL_OPAQUE(_debug, OID_AUTO, asts, CTLFLAG_RW, &asts, sizeof(asts), "IU",
106 static unsigned int rdvs[MAXCPU];
107 SYSCTL_OPAQUE(_debug, OID_AUTO, rdvs, CTLFLAG_RW, &rdvs, sizeof(rdvs), "IU",
111 SYSCTL_NODE(_debug, OID_AUTO, clock, CTLFLAG_RW, 0, "clock statistics");
113 static int adjust_edges = 0;
114 SYSCTL_INT(_debug_clock, OID_AUTO, adjust_edges, CTLFLAG_RD,
115 &adjust_edges, 0, "Number of times ITC got more than 12.5% behind");
117 static int adjust_excess = 0;
118 SYSCTL_INT(_debug_clock, OID_AUTO, adjust_excess, CTLFLAG_RD,
119 &adjust_excess, 0, "Total number of ignored ITC interrupts");
121 static int adjust_lost = 0;
122 SYSCTL_INT(_debug_clock, OID_AUTO, adjust_lost, CTLFLAG_RD,
123 &adjust_lost, 0, "Total number of lost ITC interrupts");
125 static int adjust_ticks = 0;
126 SYSCTL_INT(_debug_clock, OID_AUTO, adjust_ticks, CTLFLAG_RD,
127 &adjust_ticks, 0, "Total number of ITC interrupts with adjustment");
130 interrupt(u_int64_t vector, struct trapframe *tf)
133 volatile struct ia64_interrupt_block *ib = IA64_INTERRUPT_BLOCK;
134 uint64_t adj, clk, itc;
138 ia64_set_fpsr(IA64_FPSR_DEFAULT);
141 atomic_add_int(&td->td_intr_nesting_level, 1);
144 * Handle ExtINT interrupts by generating an INTA cycle to
148 vector = ib->ib_inta;
149 printf("ExtINT interrupt: vector=%ld\n", vector);
154 if (vector == CLOCK_VECTOR) {/* clock interrupt */
155 /* CTR0(KTR_INTR, "clock interrupt"); */
157 PCPU_LAZY_INC(cnt.v_intr);
158 #ifdef EVCNT_COUNTERS
159 clock_intr_evcnt.ev_count++;
161 intrcnt[INTRCNT_CLOCK]++;
163 clks[PCPU_GET(cpuid)]++;
167 adj = PCPU_GET(clockadj);
168 itc = ia64_get_itc();
169 ia64_set_itm(itc + ia64_clock_reload - adj);
170 clk = PCPU_GET(clock);
173 while (delta >= ia64_clock_reload) {
174 /* Only the BSP runs the real clock */
175 if (PCPU_GET(cpuid) == 0)
176 hardclock((struct clockframe *)tf);
178 hardclock_process((struct clockframe *)tf);
180 profclock((struct clockframe *)tf);
181 statclock((struct clockframe *)tf);
182 delta -= ia64_clock_reload;
183 clk += ia64_clock_reload;
189 adjust_lost += count - 1;
190 if (delta > (ia64_clock_reload >> 3)) {
193 adj = ia64_clock_reload >> 4;
200 PCPU_SET(clock, clk);
201 PCPU_SET(clockadj, adj);
206 } else if (vector == ipi_vector[IPI_AST]) {
207 asts[PCPU_GET(cpuid)]++;
208 CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid));
209 } else if (vector == ipi_vector[IPI_HIGH_FP]) {
210 struct thread *thr = PCPU_GET(fpcurthread);
212 mtx_lock_spin(&thr->td_md.md_highfp_mtx);
213 save_high_fp(&thr->td_pcb->pcb_high_fp);
214 thr->td_pcb->pcb_fpcpu = NULL;
215 PCPU_SET(fpcurthread, NULL);
216 mtx_unlock_spin(&thr->td_md.md_highfp_mtx);
218 } else if (vector == ipi_vector[IPI_RENDEZVOUS]) {
219 rdvs[PCPU_GET(cpuid)]++;
220 CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid));
221 smp_rendezvous_action();
222 } else if (vector == ipi_vector[IPI_STOP]) {
224 cpumask_t mybit = PCPU_GET(cpumask);
226 intr = intr_disable();
227 savectx(PCPU_GET(pcb));
228 atomic_set_int(&stopped_cpus, mybit);
229 while ((started_cpus & mybit) == 0)
231 atomic_clear_int(&started_cpus, mybit);
232 atomic_clear_int(&stopped_cpus, mybit);
234 } else if (vector == ipi_vector[IPI_TEST]) {
235 CTR1(KTR_SMP, "IPI_TEST, cpuid=%d", PCPU_GET(cpuid));
239 ints[PCPU_GET(cpuid)]++;
240 ia64_dispatch_intr(tf, vector);
244 atomic_subtract_int(&td->td_intr_nesting_level, 1);
245 return (TRAPF_USERMODE(tf));
249 * Hardware irqs have vectors starting at this offset.
251 #define IA64_HARDWARE_IRQ_BASE 0x20
254 struct intr_event *event; /* interrupt event */
255 volatile long *cntp; /* interrupt counter */
258 static struct mtx ia64_intrs_lock;
259 static struct ia64_intr *ia64_intrs[256];
261 extern struct sapic *ia64_sapics[];
262 extern int ia64_sapic_count;
265 ithds_init(void *dummy)
268 mtx_init(&ia64_intrs_lock, "intr table", NULL, MTX_SPIN);
270 SYSINIT(ithds_init, SI_SUB_INTR, SI_ORDER_SECOND, ithds_init, NULL);
273 ia64_send_eoi(uintptr_t vector)
277 irq = vector - IA64_HARDWARE_IRQ_BASE;
278 for (i = 0; i < ia64_sapic_count; i++) {
279 struct sapic *sa = ia64_sapics[i];
280 if (irq >= sa->sa_base && irq <= sa->sa_limit)
281 sapic_eoi(sa, vector);
286 ia64_setup_intr(const char *name, int irq, driver_intr_t handler, void *arg,
287 enum intr_type flags, void **cookiep, volatile long *cntp)
291 intptr_t vector = irq + IA64_HARDWARE_IRQ_BASE;
295 * XXX - Can we have more than one device on a vector? If so, we have
296 * a race condition here that needs to be worked around similar to
297 * the fashion done in the i386 inthand_add() function.
300 /* First, check for an existing hash table entry for this vector. */
301 mtx_lock_spin(&ia64_intrs_lock);
302 i = ia64_intrs[vector];
303 mtx_unlock_spin(&ia64_intrs_lock);
306 /* None was found, so create an entry. */
307 i = malloc(sizeof(struct ia64_intr), M_DEVBUF, M_NOWAIT);
311 i->cntp = intrcnt + irq + INTRCNT_ISA_IRQ;
314 if (name != NULL && *name != '\0') {
315 /* XXX needs abstraction. Too error phrone. */
316 intrname = intrnames + (irq + INTRCNT_ISA_IRQ) *
318 memset(intrname, ' ', INTRNAME_LEN - 1);
319 bcopy(name, intrname, strlen(name));
321 errcode = intr_event_create(&i->event, (void *)vector, 0,
322 (void (*)(void *))ia64_send_eoi, "intr:");
328 mtx_lock_spin(&ia64_intrs_lock);
329 ia64_intrs[vector] = i;
330 mtx_unlock_spin(&ia64_intrs_lock);
333 /* Second, add this handler. */
334 errcode = intr_event_add_handler(i->event, name, handler, arg,
335 intr_priority(flags), flags, cookiep);
339 return (sapic_enable(irq, vector));
343 ia64_teardown_intr(void *cookie)
346 return (intr_event_remove_handler(cookie));
350 ia64_dispatch_intr(void *frame, unsigned long vector)
353 struct intr_event *ie; /* our interrupt event */
354 struct intr_handler *ih;
358 * Find the interrupt thread for this vector.
360 i = ia64_intrs[vector];
362 return; /* no event for this vector */
365 atomic_add_long(i->cntp, 1);
368 KASSERT(ie != NULL, ("interrupt vector without an event"));
371 * As an optimization, if an event has no handlers, don't
372 * schedule it to run.
374 if (TAILQ_EMPTY(&ie->ie_handlers))
378 * Execute all fast interrupt handlers directly without Giant. Note
379 * that this means that any fast interrupt handler must be MP safe.
383 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
384 if (!(ih->ih_flags & IH_FAST)) {
388 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
389 ih->ih_handler, ih->ih_argument, ih->ih_name);
390 ih->ih_handler(ih->ih_argument);
394 error = intr_event_schedule_thread(ie);
395 KASSERT(error == 0, ("got an impossible stray interrupt"));
397 ia64_send_eoi(vector);
404 db_show_vector(int vector)
408 irq = vector - IA64_HARDWARE_IRQ_BASE;
409 for (i = 0; i < ia64_sapic_count; i++) {
410 struct sapic *sa = ia64_sapics[i];
411 if (irq >= sa->sa_base && irq <= sa->sa_limit)
412 sapic_print(sa, irq - sa->sa_base);
416 DB_SHOW_COMMAND(irq, db_show_irq)
421 vector = ((addr >> 4) % 16) * 10 + (addr % 16);
422 db_show_vector(vector);
424 for (vector = IA64_HARDWARE_IRQ_BASE;
425 vector < IA64_HARDWARE_IRQ_BASE + 64; vector++)
426 db_show_vector(vector);