1 /* $NetBSD: interrupt.c,v 1.23 1998/02/24 07:38:01 thorpej Exp $ */
3 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
6 * Authors: Keith Bostic, Chris G. Demetriou
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 * Carnegie Mellon requests users of this software to return to
20 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
29 * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center.
30 * Redistribute and modify at will, leaving only this additional copyright
34 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
35 /* __KERNEL_RCSID(0, "$NetBSD: interrupt.c,v 1.23 1998/02/24 07:38:01 thorpej Exp $");*/
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/systm.h>
41 #include <sys/interrupt.h>
43 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
49 #include <sys/sched.h>
51 #include <sys/unistd.h>
52 #include <sys/vmmeter.h>
54 #include <machine/bwx.h>
55 #include <machine/cpuconf.h>
56 #include <machine/frame.h>
57 #include <machine/intr.h>
58 #include <machine/md_var.h>
59 #include <machine/reg.h>
60 #include <machine/rpb.h>
61 #include <machine/smp.h>
64 struct evcnt clock_intr_evcnt; /* event counter for clock intrs. */
66 #include <machine/intrcnt.h>
69 volatile int mc_expected, mc_received;
72 dummy_perf(unsigned long vector, struct trapframe *framep)
74 printf("performance interrupt!\n");
77 void (*perf_irq)(unsigned long, struct trapframe *) = dummy_perf;
80 static u_int schedclk2;
81 static void alpha_clock_interrupt(struct trapframe *framep);
84 interrupt(a0, a1, a2, framep)
85 unsigned long a0, a1, a2;
86 struct trapframe *framep;
94 * Find our per-cpu globals.
99 pcpup = (struct pcpu *) alpha_pal_rdval();
102 td->td_md.md_kernnest++;
105 atomic_add_int(&td->td_intr_nesting_level, 1);
107 #if KSTACK_GUARD_PAGES == 0
110 if ((caddr_t) framep < (caddr_t) td->td_pcb + 1024) {
111 panic("possible stack overflow\n");
117 framep->tf_regs[FRAME_TRAPARG_A0] = a0;
118 framep->tf_regs[FRAME_TRAPARG_A1] = a1;
119 framep->tf_regs[FRAME_TRAPARG_A2] = a2;
122 case ALPHA_INTR_XPROC: /* interprocessor interrupt */
123 CTR0(KTR_INTR|KTR_SMP, "interprocessor interrupt");
124 smp_handle_ipi(framep); /* note: lock not taken */
128 case ALPHA_INTR_CLOCK: /* clock interrupt */
129 CTR0(KTR_INTR, "clock interrupt");
130 alpha_clock_interrupt(framep);
133 case ALPHA_INTR_ERROR: /* Machine Check or Correctable Error */
134 a0 = alpha_pal_rdmces();
135 if (platform.mcheck_handler)
136 (*platform.mcheck_handler)(a0, framep, a1, a2);
138 machine_check(a0, framep, a1, a2);
141 case ALPHA_INTR_DEVICE: /* I/O device interrupt */
142 PCPU_LAZY_INC(cnt.v_intr);
144 (*platform.iointr)(framep, a1);
147 case ALPHA_INTR_PERF: /* interprocessor interrupt */
148 perf_irq(a1, framep);
151 case ALPHA_INTR_PASSIVE:
153 printf("passive release interrupt vec 0x%lx (ignoring)\n", a1);
158 panic("unexpected interrupt: type 0x%lx vec 0x%lx a2 0x%lx\n",
162 atomic_subtract_int(&td->td_intr_nesting_level, 1);
167 void (*niointr)(void *, unsigned long);
170 panic("set iointr twice");
171 platform.iointr = niointr;
176 machine_check(mces, framep, vector, param)
178 struct trapframe *framep;
179 unsigned long vector, param;
183 /* Make sure it's an error we know about. */
184 if ((mces & (ALPHA_MCES_MIP|ALPHA_MCES_SCE|ALPHA_MCES_PCE)) == 0) {
185 type = "fatal machine check or error (unknown type)";
189 /* Machine checks. */
190 if (mces & ALPHA_MCES_MIP) {
191 /* If we weren't expecting it, then we punt. */
193 type = "unexpected machine check";
201 /* System correctable errors. */
202 if (mces & ALPHA_MCES_SCE)
203 printf("Warning: received system correctable error.\n");
205 /* Processor correctable errors. */
206 if (mces & ALPHA_MCES_PCE)
207 printf("Warning: received processor correctable error.\n");
209 /* Clear pending machine checks and correctable errors */
210 alpha_pal_wrmces(mces);
214 /* Clear pending machine checks and correctable errors */
215 alpha_pal_wrmces(mces);
218 printf("%s:\n", type);
220 printf(" mces = 0x%lx\n", mces);
221 printf(" vector = 0x%lx\n", vector);
222 printf(" param = 0x%lx\n", param);
223 printf(" pc = 0x%lx\n", framep->tf_regs[FRAME_PC]);
224 printf(" ra = 0x%lx\n", framep->tf_regs[FRAME_RA]);
225 printf(" curproc = %p\n", curproc);
227 printf(" pid = %d, comm = %s\n", curproc->p_pid,
231 kdb_trap(ALPHA_KENTRY_MM, mces, framep);
233 panic("machine check");
241 return(badaddr_read(addr, size, NULL));
245 badaddr_read(addr, size, rptr)
252 /* Get rid of any stale machine checks that have been waiting. */
255 /* Tell the trap code to expect a machine check. */
259 /* Read from the test address, and make sure the read happens. */
262 case sizeof (u_int8_t):
263 if (alpha_implver() >= ALPHA_IMPLVER_EV5
264 && alpha_amask(ALPHA_AMASK_BWX) == 0)
265 rcpt = ldbu((vm_offset_t)addr);
267 rcpt = *(volatile u_int8_t *)addr;
270 case sizeof (u_int16_t):
271 if (alpha_implver() >= ALPHA_IMPLVER_EV5
272 && alpha_amask(ALPHA_AMASK_BWX) == 0)
273 rcpt = ldwu((vm_offset_t)addr);
275 rcpt = *(volatile u_int16_t *)addr;
278 case sizeof (u_int32_t):
279 rcpt = *(volatile u_int32_t *)addr;
282 case sizeof (u_int64_t):
283 rcpt = *(volatile u_int64_t *)addr;
287 panic("badaddr: invalid size (%ld)\n", size);
290 alpha_mb(); /* magic for ev5 2100A & maybe more */
292 /* Make sure we took the machine check, if we caused one. */
295 /* disallow further machine checks */
298 if (rptr && mc_received == 0) {
300 case sizeof (u_int8_t):
301 *(volatile u_int8_t *)rptr = rcpt;
304 case sizeof (u_int16_t):
305 *(volatile u_int16_t *)rptr = rcpt;
308 case sizeof (u_int32_t):
309 *(volatile u_int32_t *)rptr = rcpt;
312 case sizeof (u_int64_t):
313 *(volatile u_int64_t *)rptr = rcpt;
317 /* Return non-zero (i.e. true) if it's a bad address. */
318 return (mc_received);
321 #define HASHVEC(vector) ((vector) % 31)
323 LIST_HEAD(alpha_intr_list, alpha_intr);
326 LIST_ENTRY(alpha_intr) list; /* chain handlers in this hash bucket */
327 uintptr_t vector; /* vector to match */
328 struct intr_event *ie; /* interrupt event structure */
329 volatile long *cntp; /* interrupt counter */
330 void (*disable)(uintptr_t);
333 static struct mtx alpha_intr_hash_lock;
334 static struct alpha_intr_list alpha_intr_hash[31];
336 static void ithds_init(void *dummy);
339 ithds_init(void *dummy)
342 mtx_init(&alpha_intr_hash_lock, "intr table", NULL, MTX_SPIN);
344 SYSINIT(ithds_init, SI_SUB_INTR, SI_ORDER_SECOND, ithds_init, NULL);
347 alpha_setup_intr(const char *name, uintptr_t vector, driver_intr_t handler, void *arg,
348 enum intr_type flags, void **cookiep, volatile long *cntp,
349 void (*disable)(uintptr_t), void (*enable)(uintptr_t))
351 int h = HASHVEC(vector);
352 struct alpha_intr *i;
356 * XXX - Can we have more than one device on a vector? If so, we have
357 * a race condition here that needs to be worked around similar to
358 * the fashion done in the i386 inthand_add() function.
361 /* First, check for an existing hash table entry for this vector. */
362 mtx_lock_spin(&alpha_intr_hash_lock);
363 for (i = LIST_FIRST(&alpha_intr_hash[h]); i && i->vector != vector;
364 i = LIST_NEXT(i, list))
366 mtx_unlock_spin(&alpha_intr_hash_lock);
369 /* None was found, so create an entry. */
370 i = malloc(sizeof(struct alpha_intr), M_DEVBUF, M_NOWAIT);
375 i->disable = disable;
376 errcode = intr_event_create(&i->ie, (void *)vector, 0,
377 (void (*)(void *))enable, "intr:");
383 mtx_lock_spin(&alpha_intr_hash_lock);
384 LIST_INSERT_HEAD(&alpha_intr_hash[h], i, list);
385 mtx_unlock_spin(&alpha_intr_hash_lock);
388 /* Second, add this handler. */
389 return (intr_event_add_handler(i->ie, name, handler, arg,
390 intr_priority(flags), flags, cookiep));
394 alpha_teardown_intr(void *cookie)
397 return (intr_event_remove_handler(cookie));
401 * XXX: Alpha doesn't count stray interrupts like some of the other archs.
404 alpha_dispatch_intr(void *frame, unsigned long vector)
406 int h = HASHVEC(vector);
407 struct alpha_intr *i;
408 struct intr_event *ie;
409 struct intr_handler *ih;
413 * Walk the hash bucket for this vector looking for this vector's
414 * interrupt structure.
416 for (i = LIST_FIRST(&alpha_intr_hash[h]); i && i->vector != vector;
417 i = LIST_NEXT(i, list))
420 /* No interrupt structure for this vector. */
425 KASSERT(ie != NULL, ("interrupt structure without an event"));
428 * As an optimization, if an event has no handlers, don't
429 * schedule it to run.
431 if (TAILQ_EMPTY(&ie->ie_handlers))
434 atomic_add_long(i->cntp, 1);
437 * It seems that we need to return from an interrupt back to PAL
438 * on the same CPU that received the interrupt, so pin the interrupted
439 * thread to the current CPU until we return from the interrupt.
443 /* Execute all fast interrupt handlers directly. */
446 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
447 if (!(ih->ih_flags & IH_FAST)) {
451 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
452 ih->ih_handler, ih->ih_argument, ih->ih_name);
453 ih->ih_handler(ih->ih_argument);
457 * If the ithread needs to run, disable the source and schedule the
463 "alpha_dispatch_intr: disabling vector 0x%x",
465 i->disable(i->vector);
467 error = intr_event_schedule_thread(ie);
468 KASSERT(error == 0, ("got an impossible stray interrupt"));
475 alpha_clock_interrupt(struct trapframe *framep)
478 PCPU_LAZY_INC(cnt.v_intr);
479 #ifdef EVCNT_COUNTERS
480 clock_intr_evcnt.ev_count++;
482 intrcnt[INTRCNT_CLOCK]++;
484 if (platform.clockintr) {
488 * Only one processor drives the actual timer.
490 if (PCPU_GET(cpuid) == 0) {
492 (*platform.clockintr)(framep);
493 /* divide hz (1024) by 8 to get stathz (128) */
494 if ((++schedclk2 & 0x7) == 0) {
496 profclock((struct clockframe *)framep);
497 statclock((struct clockframe *)framep);
501 hardclock_process((struct clockframe *)framep);
502 if ((schedclk2 & 0x7) == 0) {
504 profclock((struct clockframe *)framep);
505 statclock((struct clockframe *)framep);