2 /* $NetBSD: interrupt.c,v 1.23 1998/02/24 07:38:01 thorpej Exp $ */
5 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
8 * Authors: Keith Bostic, Chris G. Demetriou
10 * Permission to use, copy, modify and distribute this software and
11 * its documentation is hereby granted, provided that both the copyright
12 * notice and this permission notice appear in all copies of the
13 * software, derivative works or modified versions, and any portions
14 * thereof, and that both notices appear in supporting documentation.
16 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
17 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
18 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
20 * Carnegie Mellon requests users of this software to return to
22 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
23 * School of Computer Science
24 * Carnegie Mellon University
25 * Pittsburgh PA 15213-3890
27 * any improvements or extensions that they make and grant Carnegie the
28 * rights to redistribute these changes.
31 * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center.
32 * Redistribute and modify at will, leaving only this additional copyright
38 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
44 #include <sys/vmmeter.h>
46 #include <sys/malloc.h>
49 #include <sys/mutex.h>
51 #include <sys/sysctl.h>
52 #include <sys/syslog.h>
54 #include <machine/clock.h>
55 #include <machine/cpu.h>
56 #include <machine/fpu.h>
57 #include <machine/frame.h>
58 #include <machine/intr.h>
59 #include <machine/md_var.h>
60 #include <machine/pcb.h>
61 #include <machine/reg.h>
62 #include <machine/sapicvar.h>
63 #include <machine/smp.h>
66 struct evcnt clock_intr_evcnt; /* event counter for clock intrs. */
68 #include <sys/interrupt.h>
69 #include <machine/intrcnt.h>
77 extern int mp_ipi_test;
80 static void ia64_dispatch_intr(void *, u_int);
83 dummy_perf(unsigned long vector, struct trapframe *tf)
85 printf("performance interrupt!\n");
88 void (*perf_irq)(unsigned long, struct trapframe *) = dummy_perf;
90 static unsigned int ints[MAXCPU];
91 SYSCTL_OPAQUE(_debug, OID_AUTO, ints, CTLFLAG_RW, &ints, sizeof(ints), "IU",
94 static unsigned int clks[MAXCPU];
96 SYSCTL_OPAQUE(_debug, OID_AUTO, clks, CTLFLAG_RW, &clks, sizeof(clks), "IU",
99 SYSCTL_INT(_debug, OID_AUTO, clks, CTLFLAG_RW, clks, 0, "");
103 static unsigned int asts[MAXCPU];
104 SYSCTL_OPAQUE(_debug, OID_AUTO, asts, CTLFLAG_RW, &asts, sizeof(asts), "IU",
107 static unsigned int rdvs[MAXCPU];
108 SYSCTL_OPAQUE(_debug, OID_AUTO, rdvs, CTLFLAG_RW, &rdvs, sizeof(rdvs), "IU",
112 SYSCTL_NODE(_debug, OID_AUTO, clock, CTLFLAG_RW, 0, "clock statistics");
114 static int adjust_edges = 0;
115 SYSCTL_INT(_debug_clock, OID_AUTO, adjust_edges, CTLFLAG_RD,
116 &adjust_edges, 0, "Number of times ITC got more than 12.5% behind");
118 static int adjust_excess = 0;
119 SYSCTL_INT(_debug_clock, OID_AUTO, adjust_excess, CTLFLAG_RD,
120 &adjust_excess, 0, "Total number of ignored ITC interrupts");
122 static int adjust_lost = 0;
123 SYSCTL_INT(_debug_clock, OID_AUTO, adjust_lost, CTLFLAG_RD,
124 &adjust_lost, 0, "Total number of lost ITC interrupts");
126 static int adjust_ticks = 0;
127 SYSCTL_INT(_debug_clock, OID_AUTO, adjust_ticks, CTLFLAG_RD,
128 &adjust_ticks, 0, "Total number of ITC interrupts with adjustment");
131 interrupt(struct trapframe *tf)
134 volatile struct ia64_interrupt_block *ib = IA64_INTERRUPT_BLOCK;
135 uint64_t adj, clk, itc;
140 ia64_set_fpsr(IA64_FPSR_DEFAULT);
143 atomic_add_int(&td->td_intr_nesting_level, 1);
145 vector = tf->tf_special.ifa;
149 * Handle ExtINT interrupts by generating an INTA cycle to
154 printf("ExtINT interrupt: vector=%u\n", (int)inta);
156 __asm __volatile("mov cr.eoi = r0;; srlz.d");
160 } else if (vector == 15)
163 if (vector == CLOCK_VECTOR) {/* clock interrupt */
164 /* CTR0(KTR_INTR, "clock interrupt"); */
166 itc = ia64_get_itc();
168 PCPU_INC(cnt.v_intr);
169 #ifdef EVCNT_COUNTERS
170 clock_intr_evcnt.ev_count++;
172 intrcnt[INTRCNT_CLOCK]++;
174 clks[PCPU_GET(cpuid)]++;
178 adj = PCPU_GET(clockadj);
179 clk = PCPU_GET(clock);
182 while (delta >= ia64_clock_reload) {
183 /* Only the BSP runs the real clock */
184 if (PCPU_GET(cpuid) == 0)
185 hardclock(TRAPF_USERMODE(tf), TRAPF_PC(tf));
187 hardclock_cpu(TRAPF_USERMODE(tf));
189 profclock(TRAPF_USERMODE(tf), TRAPF_PC(tf));
190 statclock(TRAPF_USERMODE(tf));
191 delta -= ia64_clock_reload;
192 clk += ia64_clock_reload;
197 ia64_set_itm(itc + ia64_clock_reload - adj);
199 adjust_lost += count - 1;
200 if (delta > (ia64_clock_reload >> 3)) {
203 adj = ia64_clock_reload >> 4;
210 PCPU_SET(clock, clk);
211 PCPU_SET(clockadj, adj);
216 } else if (vector == ipi_vector[IPI_AST]) {
217 asts[PCPU_GET(cpuid)]++;
218 CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid));
219 } else if (vector == ipi_vector[IPI_HIGH_FP]) {
220 struct thread *thr = PCPU_GET(fpcurthread);
222 mtx_lock_spin(&thr->td_md.md_highfp_mtx);
223 save_high_fp(&thr->td_pcb->pcb_high_fp);
224 thr->td_pcb->pcb_fpcpu = NULL;
225 PCPU_SET(fpcurthread, NULL);
226 mtx_unlock_spin(&thr->td_md.md_highfp_mtx);
228 } else if (vector == ipi_vector[IPI_RENDEZVOUS]) {
229 rdvs[PCPU_GET(cpuid)]++;
230 CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid));
231 smp_rendezvous_action();
232 } else if (vector == ipi_vector[IPI_STOP]) {
233 cpumask_t mybit = PCPU_GET(cpumask);
235 savectx(PCPU_PTR(pcb));
236 atomic_set_int(&stopped_cpus, mybit);
237 while ((started_cpus & mybit) == 0)
239 atomic_clear_int(&started_cpus, mybit);
240 atomic_clear_int(&stopped_cpus, mybit);
241 } else if (vector == ipi_vector[IPI_TEST]) {
242 CTR1(KTR_SMP, "IPI_TEST, cpuid=%d", PCPU_GET(cpuid));
246 ints[PCPU_GET(cpuid)]++;
247 ia64_dispatch_intr(tf, vector);
250 __asm __volatile("mov cr.eoi = r0;; srlz.d");
251 vector = ia64_get_ivr();
256 atomic_subtract_int(&td->td_intr_nesting_level, 1);
258 if (TRAPF_USERMODE(tf)) {
261 mtx_assert(&Giant, MA_NOTOWNED);
267 * Hardware irqs have vectors starting at this offset.
269 #define IA64_HARDWARE_IRQ_BASE 0x20
272 struct intr_event *event; /* interrupt event */
273 volatile long *cntp; /* interrupt counter */
278 static struct ia64_intr *ia64_intrs[256];
281 ia64_intr_eoi(void *arg)
283 u_int vector = (uintptr_t)arg;
286 i = ia64_intrs[vector];
288 sapic_eoi(i->sapic, vector);
292 ia64_intr_mask(void *arg)
294 u_int vector = (uintptr_t)arg;
297 i = ia64_intrs[vector];
299 sapic_mask(i->sapic, i->irq);
300 sapic_eoi(i->sapic, vector);
305 ia64_intr_unmask(void *arg)
307 u_int vector = (uintptr_t)arg;
310 i = ia64_intrs[vector];
312 sapic_unmask(i->sapic, i->irq);
316 ia64_setup_intr(const char *name, int irq, driver_filter_t filter,
317 driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep)
325 /* Get the I/O SAPIC that corresponds to the IRQ. */
326 sa = sapic_lookup(irq);
331 * XXX - There's a priority implied by the choice of vector.
332 * We should therefore relate the vector to the interrupt type.
334 vector = irq + IA64_HARDWARE_IRQ_BASE;
336 i = ia64_intrs[vector];
338 i = malloc(sizeof(struct ia64_intr), M_DEVBUF, M_NOWAIT);
342 error = intr_event_create(&i->event, (void *)(uintptr_t)vector,
345 ia64_intr_eoi, ia64_intr_mask,
347 NULL, "irq%u:", irq);
353 if (!atomic_cmpset_ptr(&ia64_intrs[vector], NULL, i)) {
354 intr_event_destroy(i->event);
356 i = ia64_intrs[vector];
361 i->cntp = intrcnt + irq + INTRCNT_ISA_IRQ;
362 if (name != NULL && *name != '\0') {
363 /* XXX needs abstraction. Too error prone. */
364 intrname = intrnames +
365 (irq + INTRCNT_ISA_IRQ) * INTRNAME_LEN;
366 memset(intrname, ' ', INTRNAME_LEN - 1);
367 bcopy(name, intrname, strlen(name));
370 sapic_enable(i->sapic, irq, vector);
374 error = intr_event_add_handler(i->event, name, filter, handler, arg,
375 intr_priority(flags), flags, cookiep);
380 ia64_teardown_intr(void *cookie)
383 return (intr_event_remove_handler(cookie));
387 ia64_dispatch_intr(void *frame, u_int vector)
390 struct intr_event *ie; /* our interrupt event */
392 struct intr_handler *ih;
393 int error, thread, ret;
397 * Find the interrupt thread for this vector.
399 i = ia64_intrs[vector];
400 KASSERT(i != NULL, ("%s: unassigned vector", __func__));
405 KASSERT(ie != NULL, ("%s: interrupt without event", __func__));
408 if (intr_event_handle(ie, frame) != 0) {
409 ia64_intr_mask((void *)(uintptr_t)vector);
410 log(LOG_ERR, "stray irq%u\n", i->irq);
414 * As an optimization, if an event has no handlers, don't
415 * schedule it to run.
417 if (TAILQ_EMPTY(&ie->ie_handlers))
421 * Execute all fast interrupt handlers directly without Giant. Note
422 * that this means that any fast interrupt handler must be MP safe.
427 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
428 if (ih->ih_filter == NULL) {
432 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
433 ih->ih_filter, ih->ih_argument, ih->ih_name);
434 ret = ih->ih_filter(ih->ih_argument);
436 * Wrapper handler special case: see
437 * i386/intr_machdep.c::intr_execute_handlers()
440 if (ret == FILTER_SCHEDULE_THREAD)
446 ia64_intr_mask((void *)(uintptr_t)vector);
447 error = intr_event_schedule_thread(ie);
448 KASSERT(error == 0, ("%s: impossible stray", __func__));
450 ia64_intr_eoi((void *)(uintptr_t)vector);
458 db_print_vector(u_int vector, int always)
462 i = ia64_intrs[vector];
464 db_printf("vector %u (%p): ", vector, i);
465 sapic_print(i->sapic, i->irq);
467 db_printf("vector %u: unassigned\n", vector);
470 DB_SHOW_COMMAND(vector, db_show_vector)
475 vector = ((addr >> 4) % 16) * 10 + (addr % 16);
477 db_printf("error: vector %u not in range [0..255]\n",
480 db_print_vector(vector, 1);
482 for (vector = 0; vector < 256; vector++)
483 db_print_vector(vector, 0);