]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/hatm/if_hatm_intr.c
Integrate tools/regression/fifo into the FreeBSD test suite as tests/sys/fifo
[FreeBSD/FreeBSD.git] / sys / dev / hatm / if_hatm_intr.c
1 /*-
2  * Copyright (c) 2001-2003
3  *      Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4  *      All rights reserved.
5  * Author: Hartmut Brandt <harti@freebsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 /*
33  * ForeHE driver.
34  *
35  * Interrupt handler.
36  */
37
38 #include "opt_inet.h"
39 #include "opt_natm.h"
40
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/bus.h>
47 #include <sys/errno.h>
48 #include <sys/conf.h>
49 #include <sys/module.h>
50 #include <sys/queue.h>
51 #include <sys/syslog.h>
52 #include <sys/condvar.h>
53 #include <sys/sysctl.h>
54 #include <vm/uma.h>
55
56 #include <sys/sockio.h>
57 #include <sys/mbuf.h>
58 #include <sys/socket.h>
59
60 #include <net/if.h>
61 #include <net/if_var.h>
62 #include <net/if_media.h>
63 #include <net/if_atm.h>
64 #include <net/route.h>
65 #include <netinet/in.h>
66 #include <netinet/if_atm.h>
67
68 #include <machine/bus.h>
69 #include <machine/resource.h>
70 #include <sys/bus.h>
71 #include <sys/rman.h>
72 #include <dev/pci/pcireg.h>
73 #include <dev/pci/pcivar.h>
74
75 #include <dev/utopia/utopia.h>
76 #include <dev/hatm/if_hatmconf.h>
77 #include <dev/hatm/if_hatmreg.h>
78 #include <dev/hatm/if_hatmvar.h>
79
80 CTASSERT(sizeof(struct mbuf_page) == MBUF_ALLOC_SIZE);
81 CTASSERT(sizeof(struct mbuf0_chunk) == MBUF0_CHUNK);
82 CTASSERT(sizeof(struct mbuf1_chunk) == MBUF1_CHUNK);
83 CTASSERT(sizeof(((struct mbuf0_chunk *)NULL)->storage) >= MBUF0_SIZE);
84 CTASSERT(sizeof(((struct mbuf1_chunk *)NULL)->storage) >= MBUF1_SIZE);
85 CTASSERT(sizeof(struct tpd) <= HE_TPD_SIZE);
86
87 CTASSERT(MBUF0_PER_PAGE <= 256);
88 CTASSERT(MBUF1_PER_PAGE <= 256);
89
90 static void hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group);
91
92 /*
93  * Free an external mbuf to a list. We use atomic functions so that
94  * we don't need a mutex for the list.
95  *
96  * Note that in general this algorithm is not safe when multiple readers
97  * and writers are present. To cite from a mail from David Schultz
98  * <das@freebsd.org>:
99  *
100  *      It looks like this is subject to the ABA problem.  For instance,
101  *      suppose X, Y, and Z are the top things on the freelist and a
102  *      thread attempts to make an allocation.  You set buf to X and load
103  *      buf->link (Y) into a register.  Then the thread get preempted, and
104  *      another thread allocates both X and Y, then frees X.  When the
105  *      original thread gets the CPU again, X is still on top of the
106  *      freelist, so the atomic operation succeeds.  However, the atomic
107  *      op places Y on top of the freelist, even though Y is no longer
108  *      free.
109  *
110  * We are, however sure that we have only one thread that ever allocates
111  * buffers because the only place we're call from is the interrupt handler.
112  * Under these circumstances the code looks safe.
113  */
114 void
115 hatm_ext_free(struct mbufx_free **list, struct mbufx_free *buf)
116 {
117         for (;;) {
118                 buf->link = *list;
119                 if (atomic_cmpset_ptr((uintptr_t *)list, (uintptr_t)buf->link,
120                     (uintptr_t)buf))
121                         break;
122         }
123 }
124
125 static __inline struct mbufx_free *
126 hatm_ext_alloc(struct hatm_softc *sc, u_int g)
127 {
128         struct mbufx_free *buf;
129
130         for (;;) {
131                 if ((buf = sc->mbuf_list[g]) == NULL)
132                         break;
133                 if (atomic_cmpset_ptr((uintptr_t *)&sc->mbuf_list[g],
134                         (uintptr_t)buf, (uintptr_t)buf->link))
135                         break;
136         }
137         if (buf == NULL) {
138                 hatm_mbuf_page_alloc(sc, g);
139                 for (;;) {
140                         if ((buf = sc->mbuf_list[g]) == NULL)
141                                 break;
142                         if (atomic_cmpset_ptr((uintptr_t *)&sc->mbuf_list[g],
143                             (uintptr_t)buf, (uintptr_t)buf->link))
144                                 break;
145                 }
146         }
147         return (buf);
148 }
149
150 /*
151  * Either the queue treshold was crossed or a TPD with the INTR bit set
152  * was transmitted.
153  */
154 static void
155 he_intr_tbrq(struct hatm_softc *sc, struct hetbrq *q, u_int group)
156 {
157         uint32_t *tailp = &sc->hsp->group[group].tbrq_tail;
158         u_int no;
159
160         while (q->head != (*tailp >> 2)) {
161                 no = (q->tbrq[q->head].addr & HE_REGM_TBRQ_ADDR) >>
162                     HE_REGS_TPD_ADDR;
163                 hatm_tx_complete(sc, TPD_ADDR(sc, no),
164                     (q->tbrq[q->head].addr & HE_REGM_TBRQ_FLAGS));
165
166                 if (++q->head == q->size)
167                         q->head = 0;
168         }
169         WRITE4(sc, HE_REGO_TBRQ_H(group), q->head << 2);
170 }
171
172 /*
173  * DMA loader function for external mbuf page.
174  */
175 static void
176 hatm_extbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs,
177     int error)
178 {
179         if (error) {
180                 printf("%s: mapping error %d\n", __func__, error);
181                 return;
182         }
183         KASSERT(nsegs == 1,
184             ("too many segments for DMA: %d", nsegs));
185         KASSERT(segs[0].ds_addr <= 0xffffffffLU,
186             ("phys addr too large %lx", (u_long)segs[0].ds_addr));
187
188         *(uint32_t *)arg = segs[0].ds_addr;
189 }
190
191 /*
192  * Allocate a page of external mbuf storage for the small pools.
193  * Create a DMA map and load it. Put all the chunks onto the right
194  * free list.
195  */
196 static void
197 hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group)
198 {
199         struct mbuf_page *pg;
200         int err;
201         u_int i;
202
203         if (sc->mbuf_npages == sc->mbuf_max_pages)
204                 return;
205         if ((pg = malloc(MBUF_ALLOC_SIZE, M_DEVBUF, M_NOWAIT)) == NULL)
206                 return;
207
208         err = bus_dmamap_create(sc->mbuf_tag, 0, &pg->hdr.map);
209         if (err != 0) {
210                 if_printf(sc->ifp, "%s -- bus_dmamap_create: %d\n",
211                     __func__, err);
212                 free(pg, M_DEVBUF);
213                 return;
214         }
215         err = bus_dmamap_load(sc->mbuf_tag, pg->hdr.map, pg, MBUF_ALLOC_SIZE,
216             hatm_extbuf_helper, &pg->hdr.phys, BUS_DMA_NOWAIT);
217         if (err != 0) {
218                 if_printf(sc->ifp, "%s -- mbuf mapping failed %d\n",
219                     __func__, err);
220                 bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map);
221                 free(pg, M_DEVBUF);
222                 return;
223         }
224
225         sc->mbuf_pages[sc->mbuf_npages] = pg;
226
227         if (group == 0) {
228                 struct mbuf0_chunk *c;
229
230                 pg->hdr.pool = 0;
231                 pg->hdr.nchunks = MBUF0_PER_PAGE;
232                 pg->hdr.chunksize = MBUF0_CHUNK;
233                 pg->hdr.hdroff = sizeof(c->storage);
234                 c = (struct mbuf0_chunk *)pg;
235                 for (i = 0; i < MBUF0_PER_PAGE; i++, c++) {
236                         c->hdr.pageno = sc->mbuf_npages;
237                         c->hdr.chunkno = i;
238                         c->hdr.flags = 0;
239                         hatm_ext_free(&sc->mbuf_list[0],
240                             (struct mbufx_free *)c);
241                 }
242         } else {
243                 struct mbuf1_chunk *c;
244
245                 pg->hdr.pool = 1;
246                 pg->hdr.nchunks = MBUF1_PER_PAGE;
247                 pg->hdr.chunksize = MBUF1_CHUNK;
248                 pg->hdr.hdroff = sizeof(c->storage);
249                 c = (struct mbuf1_chunk *)pg;
250                 for (i = 0; i < MBUF1_PER_PAGE; i++, c++) {
251                         c->hdr.pageno = sc->mbuf_npages;
252                         c->hdr.chunkno = i;
253                         c->hdr.flags = 0;
254                         hatm_ext_free(&sc->mbuf_list[1],
255                             (struct mbufx_free *)c);
256                 }
257         }
258         sc->mbuf_npages++;
259 }
260
261 /*
262  * Free an mbuf and put it onto the free list.
263  */
264 static void
265 hatm_mbuf0_free(struct mbuf *m, void *buf, void *args)
266 {
267         struct hatm_softc *sc = args;
268         struct mbuf0_chunk *c = buf;
269
270         KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED,
271             ("freeing unused mbuf %x", c->hdr.flags));
272         c->hdr.flags &= ~MBUF_USED;
273         hatm_ext_free(&sc->mbuf_list[0], (struct mbufx_free *)c);
274 }
275 static void
276 hatm_mbuf1_free(struct mbuf *m, void *buf, void *args)
277 {
278         struct hatm_softc *sc = args;
279         struct mbuf1_chunk *c = buf;
280
281         KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED,
282             ("freeing unused mbuf %x", c->hdr.flags));
283         c->hdr.flags &= ~MBUF_USED;
284         hatm_ext_free(&sc->mbuf_list[1], (struct mbufx_free *)c);
285 }
286
287 static void
288 hatm_mbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
289 {
290         uint32_t *ptr = (uint32_t *)arg;
291
292         if (nsegs == 0) {
293                 printf("%s: error=%d\n", __func__, error);
294                 return;
295         }
296         KASSERT(nsegs == 1, ("too many segments for mbuf: %d", nsegs));
297         KASSERT(segs[0].ds_addr <= 0xffffffffLU,
298             ("phys addr too large %lx", (u_long)segs[0].ds_addr));
299
300         *ptr = segs[0].ds_addr;
301 }
302
303 /*
304  * Receive buffer pool interrupt. This means the number of entries in the
305  * queue has dropped below the threshold. Try to supply new buffers.
306  */
307 static void
308 he_intr_rbp(struct hatm_softc *sc, struct herbp *rbp, u_int large,
309     u_int group)
310 {
311         u_int ntail;
312         struct mbuf *m;
313         int error;
314         struct mbufx_free *cf;
315         struct mbuf_page *pg;
316         struct mbuf0_chunk *buf0;
317         struct mbuf1_chunk *buf1;
318
319         DBG(sc, INTR, ("%s buffer supply threshold crossed for group %u",
320            large ? "large" : "small", group));
321
322         rbp->head = (READ4(sc, HE_REGO_RBP_S(large, group)) >> HE_REGS_RBP_HEAD)
323             & (rbp->size - 1);
324
325         for (;;) {
326                 if ((ntail = rbp->tail + 1) == rbp->size)
327                         ntail = 0;
328                 if (ntail == rbp->head)
329                         break;
330                 m = NULL;
331
332                 if (large) {
333                         /* allocate the MBUF */
334                         if ((m = m_getcl(M_NOWAIT, MT_DATA,
335                             M_PKTHDR)) == NULL) {
336                                 if_printf(sc->ifp,
337                                     "no mbuf clusters\n");
338                                 break;
339                         }
340                         m->m_data += MBUFL_OFFSET;
341
342                         if (sc->lbufs[sc->lbufs_next] != NULL)
343                                 panic("hatm: lbufs full %u", sc->lbufs_next);
344                         sc->lbufs[sc->lbufs_next] = m;
345
346                         if ((error = bus_dmamap_load(sc->mbuf_tag,
347                             sc->rmaps[sc->lbufs_next],
348                             m->m_data, rbp->bsize, hatm_mbuf_helper,
349                             &rbp->rbp[rbp->tail].phys, BUS_DMA_NOWAIT)) != 0)
350                                 panic("hatm: mbuf mapping failed %d", error);
351
352                         bus_dmamap_sync(sc->mbuf_tag,
353                             sc->rmaps[sc->lbufs_next],
354                             BUS_DMASYNC_PREREAD);
355
356                         rbp->rbp[rbp->tail].handle =
357                             MBUF_MAKE_LHANDLE(sc->lbufs_next);
358
359                         if (++sc->lbufs_next == sc->lbufs_size)
360                                 sc->lbufs_next = 0;
361
362                 } else if (group == 0) {
363                         /*
364                          * Allocate small buffer in group 0
365                          */
366                         if ((cf = hatm_ext_alloc(sc, 0)) == NULL)
367                                 break;
368                         buf0 = (struct mbuf0_chunk *)cf;
369                         pg = sc->mbuf_pages[buf0->hdr.pageno];
370                         buf0->hdr.flags |= MBUF_CARD;
371                         rbp->rbp[rbp->tail].phys = pg->hdr.phys +
372                             buf0->hdr.chunkno * MBUF0_CHUNK + MBUF0_OFFSET;
373                         rbp->rbp[rbp->tail].handle =
374                             MBUF_MAKE_HANDLE(buf0->hdr.pageno,
375                             buf0->hdr.chunkno);
376
377                         bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map,
378                             BUS_DMASYNC_PREREAD);
379
380                 } else if (group == 1) {
381                         /*
382                          * Allocate small buffer in group 1
383                          */
384                         if ((cf = hatm_ext_alloc(sc, 1)) == NULL)
385                                 break;
386                         buf1 = (struct mbuf1_chunk *)cf;
387                         pg = sc->mbuf_pages[buf1->hdr.pageno];
388                         buf1->hdr.flags |= MBUF_CARD;
389                         rbp->rbp[rbp->tail].phys = pg->hdr.phys +
390                             buf1->hdr.chunkno * MBUF1_CHUNK + MBUF1_OFFSET;
391                         rbp->rbp[rbp->tail].handle =
392                             MBUF_MAKE_HANDLE(buf1->hdr.pageno,
393                             buf1->hdr.chunkno);
394
395                         bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map,
396                             BUS_DMASYNC_PREREAD);
397
398                 } else
399                         /* ups */
400                         break;
401
402                 DBG(sc, DMA, ("MBUF loaded: handle=%x m=%p phys=%x",
403                     rbp->rbp[rbp->tail].handle, m, rbp->rbp[rbp->tail].phys));
404
405                 rbp->tail = ntail;
406         }
407         WRITE4(sc, HE_REGO_RBP_T(large, group),
408             (rbp->tail << HE_REGS_RBP_TAIL));
409 }
410
411 /*
412  * Extract the buffer and hand it to the receive routine
413  */
414 static struct mbuf *
415 hatm_rx_buffer(struct hatm_softc *sc, u_int group, u_int handle)
416 {
417         u_int pageno;
418         u_int chunkno;
419         struct mbuf *m;
420
421         if (handle & MBUF_LARGE_FLAG) {
422                 /* large buffer - sync and unload */
423                 MBUF_PARSE_LHANDLE(handle, handle);
424                 DBG(sc, RX, ("RX large handle=%x", handle));
425
426                 bus_dmamap_sync(sc->mbuf_tag, sc->rmaps[handle],
427                     BUS_DMASYNC_POSTREAD);
428                 bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[handle]);
429
430                 m = sc->lbufs[handle];
431                 sc->lbufs[handle] = NULL;
432
433                 return (m);
434         }
435
436         MBUF_PARSE_HANDLE(handle, pageno, chunkno);
437
438         DBG(sc, RX, ("RX group=%u handle=%x page=%u chunk=%u", group, handle,
439             pageno, chunkno));
440
441         MGETHDR(m, M_NOWAIT, MT_DATA);
442
443         if (group == 0) {
444                 struct mbuf0_chunk *c0;
445
446                 c0 = (struct mbuf0_chunk *)sc->mbuf_pages[pageno] + chunkno;
447                 KASSERT(c0->hdr.pageno == pageno, ("pageno = %u/%u",
448                     c0->hdr.pageno, pageno));
449                 KASSERT(c0->hdr.chunkno == chunkno, ("chunkno = %u/%u",
450                     c0->hdr.chunkno, chunkno));
451                 KASSERT(c0->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u",
452                     pageno, chunkno));
453                 KASSERT(!(c0->hdr.flags & MBUF_USED), ("used mbuf %u/%u",
454                     pageno, chunkno));
455
456                 c0->hdr.flags |= MBUF_USED;
457                 c0->hdr.flags &= ~MBUF_CARD;
458
459                 if (m != NULL) {
460                         m->m_ext.ext_cnt = &c0->hdr.ref_cnt;
461                         MEXTADD(m, (void *)c0, MBUF0_SIZE,
462                             hatm_mbuf0_free, c0, sc, M_PKTHDR, EXT_EXTREF);
463                         m->m_data += MBUF0_OFFSET;
464                 } else
465                         (void)hatm_mbuf0_free(NULL, c0, sc);
466
467         } else {
468                 struct mbuf1_chunk *c1;
469
470                 c1 = (struct mbuf1_chunk *)sc->mbuf_pages[pageno] + chunkno;
471                 KASSERT(c1->hdr.pageno == pageno, ("pageno = %u/%u",
472                     c1->hdr.pageno, pageno));
473                 KASSERT(c1->hdr.chunkno == chunkno, ("chunkno = %u/%u",
474                     c1->hdr.chunkno, chunkno));
475                 KASSERT(c1->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u",
476                     pageno, chunkno));
477                 KASSERT(!(c1->hdr.flags & MBUF_USED), ("used mbuf %u/%u",
478                     pageno, chunkno));
479
480                 c1->hdr.flags |= MBUF_USED;
481                 c1->hdr.flags &= ~MBUF_CARD;
482
483                 if (m != NULL) {
484                         m->m_ext.ext_cnt = &c1->hdr.ref_cnt;
485                         MEXTADD(m, (void *)c1, MBUF1_SIZE,
486                             hatm_mbuf1_free, c1, sc, M_PKTHDR, EXT_EXTREF);
487                         m->m_data += MBUF1_OFFSET;
488                 } else
489                         (void)hatm_mbuf1_free(NULL, c1, sc);
490         }
491
492         return (m);
493 }
494
495 /*
496  * Interrupt because of receive buffer returned.
497  */
498 static void
499 he_intr_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group)
500 {
501         struct he_rbrqen *e;
502         uint32_t flags, tail;
503         u_int cid, len;
504         struct mbuf *m;
505
506         for (;;) {
507                 tail = sc->hsp->group[group].rbrq_tail >> 3;
508
509                 if (rq->head == tail)
510                         break;
511
512                 e = &rq->rbrq[rq->head];
513
514                 flags = e->addr & HE_REGM_RBRQ_FLAGS;
515                 if (!(flags & HE_REGM_RBRQ_HBUF_ERROR))
516                         m = hatm_rx_buffer(sc, group, e->addr);
517                 else
518                         m = NULL;
519
520                 cid = (e->len & HE_REGM_RBRQ_CID) >> HE_REGS_RBRQ_CID;
521                 len = 4 * (e->len & HE_REGM_RBRQ_LEN);
522
523                 hatm_rx(sc, cid, flags, m, len);
524
525                 if (++rq->head == rq->size)
526                         rq->head = 0;
527         }
528         WRITE4(sc, HE_REGO_RBRQ_H(group), rq->head << 3);
529 }
530
531 void
532 hatm_intr(void *p)
533 {
534         struct heirq *q = p;
535         struct hatm_softc *sc = q->sc;
536         u_int status;
537         u_int tail;
538
539         /* if we have a stray interrupt with a non-initialized card,
540          * we cannot even lock before looking at the flag */
541         if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
542                 return;
543
544         mtx_lock(&sc->mtx);
545         (void)READ4(sc, HE_REGO_INT_FIFO);
546
547         tail = *q->tailp;
548         if (q->head == tail) {
549                 /* workaround for tail pointer not updated bug (8.1.1) */
550                 DBG(sc, INTR, ("hatm: intr tailq not updated bug triggered"));
551
552                 /* read the tail pointer from the card */
553                 tail = READ4(sc, HE_REGO_IRQ_BASE(q->group)) &
554                     HE_REGM_IRQ_BASE_TAIL;
555                 BARRIER_R(sc);
556
557                 sc->istats.bug_no_irq_upd++;
558         }
559
560         /* clear the interrupt */
561         WRITE4(sc, HE_REGO_INT_FIFO, HE_REGM_INT_FIFO_CLRA);
562         BARRIER_W(sc);
563
564         while (q->head != tail) {
565                 status = q->irq[q->head];
566                 q->irq[q->head] = HE_REGM_ITYPE_INVALID;
567                 if (++q->head == (q->size - 1))
568                         q->head = 0;
569
570                 switch (status & HE_REGM_ITYPE) {
571
572                   case HE_REGM_ITYPE_TBRQ:
573                         DBG(sc, INTR, ("TBRQ treshold %u", status & HE_REGM_IGROUP));
574                         sc->istats.itype_tbrq++;
575                         he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP);
576                         break;
577
578                   case HE_REGM_ITYPE_TPD:
579                         DBG(sc, INTR, ("TPD ready %u", status & HE_REGM_IGROUP));
580                         sc->istats.itype_tpd++;
581                         he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP);
582                         break;
583
584                   case HE_REGM_ITYPE_RBPS:
585                         sc->istats.itype_rbps++;
586                         switch (status & HE_REGM_IGROUP) {
587
588                           case 0:
589                                 he_intr_rbp(sc, &sc->rbp_s0, 0, 0);
590                                 break;
591
592                           case 1:
593                                 he_intr_rbp(sc, &sc->rbp_s1, 0, 1);
594                                 break;
595
596                           default:
597                                 if_printf(sc->ifp, "bad INTR RBPS%u\n",
598                                     status & HE_REGM_IGROUP);
599                                 break;
600                         }
601                         break;
602
603                   case HE_REGM_ITYPE_RBPL:
604                         sc->istats.itype_rbpl++;
605                         switch (status & HE_REGM_IGROUP) {
606
607                           case 0:
608                                 he_intr_rbp(sc, &sc->rbp_l0, 1, 0);
609                                 break;
610
611                           default:
612                                 if_printf(sc->ifp, "bad INTR RBPL%u\n",
613                                     status & HE_REGM_IGROUP);
614                                 break;
615                         }
616                         break;
617
618                   case HE_REGM_ITYPE_RBRQ:
619                         DBG(sc, INTR, ("INTERRUPT RBRQ %u", status & HE_REGM_IGROUP));
620                         sc->istats.itype_rbrq++;
621                         switch (status & HE_REGM_IGROUP) {
622
623                           case 0:
624                                 he_intr_rbrq(sc, &sc->rbrq_0, 0);
625                                 break;
626
627                           case 1:
628                                 if (sc->rbrq_1.size > 0) {
629                                         he_intr_rbrq(sc, &sc->rbrq_1, 1);
630                                         break;
631                                 }
632                                 /* FALLTHRU */
633
634                           default:
635                                 if_printf(sc->ifp, "bad INTR RBRQ%u\n",
636                                     status & HE_REGM_IGROUP);
637                                 break;
638                         }
639                         break;
640
641                   case HE_REGM_ITYPE_RBRQT:
642                         DBG(sc, INTR, ("INTERRUPT RBRQT %u", status & HE_REGM_IGROUP));
643                         sc->istats.itype_rbrqt++;
644                         switch (status & HE_REGM_IGROUP) {
645
646                           case 0:
647                                 he_intr_rbrq(sc, &sc->rbrq_0, 0);
648                                 break;
649
650                           case 1:
651                                 if (sc->rbrq_1.size > 0) {
652                                         he_intr_rbrq(sc, &sc->rbrq_1, 1);
653                                         break;
654                                 }
655                                 /* FALLTHRU */
656
657                           default:
658                                 if_printf(sc->ifp, "bad INTR RBRQT%u\n",
659                                     status & HE_REGM_IGROUP);
660                                 break;
661                         }
662                         break;
663
664                   case HE_REGM_ITYPE_PHYS:
665                         sc->istats.itype_phys++;
666                         utopia_intr(&sc->utopia);
667                         break;
668
669 #if HE_REGM_ITYPE_UNKNOWN != HE_REGM_ITYPE_INVALID
670                   case HE_REGM_ITYPE_UNKNOWN:
671                         sc->istats.itype_unknown++;
672                         if_printf(sc->ifp, "bad interrupt\n");
673                         break;
674 #endif
675
676                   case HE_REGM_ITYPE_ERR:
677                         sc->istats.itype_err++;
678                         switch (status) {
679
680                           case HE_REGM_ITYPE_PERR:
681                                 if_printf(sc->ifp, "parity error\n");
682                                 break;
683
684                           case HE_REGM_ITYPE_ABORT:
685                                 if_printf(sc->ifp, "abort interrupt "
686                                     "addr=0x%08x\n",
687                                     READ4(sc, HE_REGO_ABORT_ADDR));
688                                 break;
689
690                           default:
691                                 if_printf(sc->ifp,
692                                     "bad interrupt type %08x\n", status);
693                                 break;
694                         }
695                         break;
696
697                   case HE_REGM_ITYPE_INVALID:
698                         /* this is the documented fix for the ISW bug 8.1.1
699                          * Note, that the documented fix is partly wrong:
700                          * the ISWs should be intialized to 0xf8 not 0xff */
701                         sc->istats.bug_bad_isw++;
702                         DBG(sc, INTR, ("hatm: invalid ISW bug triggered"));
703                         he_intr_tbrq(sc, &sc->tbrq, 0);
704                         he_intr_rbp(sc, &sc->rbp_s0, 0, 0);
705                         he_intr_rbp(sc, &sc->rbp_l0, 1, 0);
706                         he_intr_rbp(sc, &sc->rbp_s1, 0, 1);
707                         he_intr_rbrq(sc, &sc->rbrq_0, 0);
708                         he_intr_rbrq(sc, &sc->rbrq_1, 1);
709                         utopia_intr(&sc->utopia);
710                         break;
711
712                   default:
713                         if_printf(sc->ifp, "bad interrupt type %08x\n",
714                             status);
715                         break;
716                 }
717         }
718
719         /* write back head to clear queue */
720         WRITE4(sc, HE_REGO_IRQ_HEAD(0),
721             ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) |
722             (q->thresh << HE_REGS_IRQ_HEAD_THRESH) |
723             (q->head << HE_REGS_IRQ_HEAD_HEAD));
724         BARRIER_W(sc);
725
726         /* workaround the back-to-back irq access problem (8.1.2) */
727         (void)READ4(sc, HE_REGO_INT_FIFO);
728         BARRIER_R(sc);
729
730         mtx_unlock(&sc->mtx);
731 }