2 * Copyright (c) 2001-2003
3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus).
5 * Author: Hartmut Brandt <harti@freebsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
47 #include <sys/errno.h>
49 #include <sys/module.h>
50 #include <sys/queue.h>
51 #include <sys/syslog.h>
52 #include <sys/condvar.h>
53 #include <sys/sysctl.h>
56 #include <sys/sockio.h>
58 #include <sys/socket.h>
61 #include <net/if_var.h>
62 #include <net/if_media.h>
63 #include <net/if_atm.h>
64 #include <net/route.h>
65 #include <netinet/in.h>
66 #include <netinet/if_atm.h>
68 #include <machine/bus.h>
69 #include <machine/resource.h>
72 #include <dev/pci/pcireg.h>
73 #include <dev/pci/pcivar.h>
75 #include <dev/utopia/utopia.h>
76 #include <dev/hatm/if_hatmconf.h>
77 #include <dev/hatm/if_hatmreg.h>
78 #include <dev/hatm/if_hatmvar.h>
80 CTASSERT(sizeof(struct mbuf_page) == MBUF_ALLOC_SIZE);
81 CTASSERT(sizeof(struct mbuf0_chunk) == MBUF0_CHUNK);
82 CTASSERT(sizeof(struct mbuf1_chunk) == MBUF1_CHUNK);
83 CTASSERT(sizeof(((struct mbuf0_chunk *)NULL)->storage) >= MBUF0_SIZE);
84 CTASSERT(sizeof(((struct mbuf1_chunk *)NULL)->storage) >= MBUF1_SIZE);
85 CTASSERT(sizeof(struct tpd) <= HE_TPD_SIZE);
87 CTASSERT(MBUF0_PER_PAGE <= 256);
88 CTASSERT(MBUF1_PER_PAGE <= 256);
90 static void hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group);
93 * Free an external mbuf to a list. We use atomic functions so that
94 * we don't need a mutex for the list.
96 * Note that in general this algorithm is not safe when multiple readers
97 * and writers are present. To cite from a mail from David Schultz
100 * It looks like this is subject to the ABA problem. For instance,
101 * suppose X, Y, and Z are the top things on the freelist and a
102 * thread attempts to make an allocation. You set buf to X and load
103 * buf->link (Y) into a register. Then the thread get preempted, and
104 * another thread allocates both X and Y, then frees X. When the
105 * original thread gets the CPU again, X is still on top of the
106 * freelist, so the atomic operation succeeds. However, the atomic
107 * op places Y on top of the freelist, even though Y is no longer
110 * We are, however sure that we have only one thread that ever allocates
111 * buffers because the only place we're call from is the interrupt handler.
112 * Under these circumstances the code looks safe.
115 hatm_ext_free(struct mbufx_free **list, struct mbufx_free *buf)
119 if (atomic_cmpset_ptr((uintptr_t *)list, (uintptr_t)buf->link,
125 static __inline struct mbufx_free *
126 hatm_ext_alloc(struct hatm_softc *sc, u_int g)
128 struct mbufx_free *buf;
131 if ((buf = sc->mbuf_list[g]) == NULL)
133 if (atomic_cmpset_ptr((uintptr_t *)&sc->mbuf_list[g],
134 (uintptr_t)buf, (uintptr_t)buf->link))
138 hatm_mbuf_page_alloc(sc, g);
140 if ((buf = sc->mbuf_list[g]) == NULL)
142 if (atomic_cmpset_ptr((uintptr_t *)&sc->mbuf_list[g],
143 (uintptr_t)buf, (uintptr_t)buf->link))
151 * Either the queue treshold was crossed or a TPD with the INTR bit set
155 he_intr_tbrq(struct hatm_softc *sc, struct hetbrq *q, u_int group)
157 uint32_t *tailp = &sc->hsp->group[group].tbrq_tail;
160 while (q->head != (*tailp >> 2)) {
161 no = (q->tbrq[q->head].addr & HE_REGM_TBRQ_ADDR) >>
163 hatm_tx_complete(sc, TPD_ADDR(sc, no),
164 (q->tbrq[q->head].addr & HE_REGM_TBRQ_FLAGS));
166 if (++q->head == q->size)
169 WRITE4(sc, HE_REGO_TBRQ_H(group), q->head << 2);
173 * DMA loader function for external mbuf page.
176 hatm_extbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs,
180 printf("%s: mapping error %d\n", __func__, error);
184 ("too many segments for DMA: %d", nsegs));
185 KASSERT(segs[0].ds_addr <= 0xffffffffLU,
186 ("phys addr too large %lx", (u_long)segs[0].ds_addr));
188 *(uint32_t *)arg = segs[0].ds_addr;
192 * Allocate a page of external mbuf storage for the small pools.
193 * Create a DMA map and load it. Put all the chunks onto the right
197 hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group)
199 struct mbuf_page *pg;
203 if (sc->mbuf_npages == sc->mbuf_max_pages)
205 if ((pg = malloc(MBUF_ALLOC_SIZE, M_DEVBUF, M_NOWAIT)) == NULL)
208 err = bus_dmamap_create(sc->mbuf_tag, 0, &pg->hdr.map);
210 if_printf(sc->ifp, "%s -- bus_dmamap_create: %d\n",
215 err = bus_dmamap_load(sc->mbuf_tag, pg->hdr.map, pg, MBUF_ALLOC_SIZE,
216 hatm_extbuf_helper, &pg->hdr.phys, BUS_DMA_NOWAIT);
218 if_printf(sc->ifp, "%s -- mbuf mapping failed %d\n",
220 bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map);
225 sc->mbuf_pages[sc->mbuf_npages] = pg;
228 struct mbuf0_chunk *c;
231 pg->hdr.nchunks = MBUF0_PER_PAGE;
232 pg->hdr.chunksize = MBUF0_CHUNK;
233 pg->hdr.hdroff = sizeof(c->storage);
234 c = (struct mbuf0_chunk *)pg;
235 for (i = 0; i < MBUF0_PER_PAGE; i++, c++) {
236 c->hdr.pageno = sc->mbuf_npages;
239 hatm_ext_free(&sc->mbuf_list[0],
240 (struct mbufx_free *)c);
243 struct mbuf1_chunk *c;
246 pg->hdr.nchunks = MBUF1_PER_PAGE;
247 pg->hdr.chunksize = MBUF1_CHUNK;
248 pg->hdr.hdroff = sizeof(c->storage);
249 c = (struct mbuf1_chunk *)pg;
250 for (i = 0; i < MBUF1_PER_PAGE; i++, c++) {
251 c->hdr.pageno = sc->mbuf_npages;
254 hatm_ext_free(&sc->mbuf_list[1],
255 (struct mbufx_free *)c);
262 * Free an mbuf and put it onto the free list.
265 hatm_mbuf0_free(struct mbuf *m, void *buf, void *args)
267 struct hatm_softc *sc = args;
268 struct mbuf0_chunk *c = buf;
270 KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED,
271 ("freeing unused mbuf %x", c->hdr.flags));
272 c->hdr.flags &= ~MBUF_USED;
273 hatm_ext_free(&sc->mbuf_list[0], (struct mbufx_free *)c);
276 hatm_mbuf1_free(struct mbuf *m, void *buf, void *args)
278 struct hatm_softc *sc = args;
279 struct mbuf1_chunk *c = buf;
281 KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED,
282 ("freeing unused mbuf %x", c->hdr.flags));
283 c->hdr.flags &= ~MBUF_USED;
284 hatm_ext_free(&sc->mbuf_list[1], (struct mbufx_free *)c);
288 hatm_mbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
290 uint32_t *ptr = (uint32_t *)arg;
293 printf("%s: error=%d\n", __func__, error);
296 KASSERT(nsegs == 1, ("too many segments for mbuf: %d", nsegs));
297 KASSERT(segs[0].ds_addr <= 0xffffffffLU,
298 ("phys addr too large %lx", (u_long)segs[0].ds_addr));
300 *ptr = segs[0].ds_addr;
304 * Receive buffer pool interrupt. This means the number of entries in the
305 * queue has dropped below the threshold. Try to supply new buffers.
308 he_intr_rbp(struct hatm_softc *sc, struct herbp *rbp, u_int large,
314 struct mbufx_free *cf;
315 struct mbuf_page *pg;
316 struct mbuf0_chunk *buf0;
317 struct mbuf1_chunk *buf1;
319 DBG(sc, INTR, ("%s buffer supply threshold crossed for group %u",
320 large ? "large" : "small", group));
322 rbp->head = (READ4(sc, HE_REGO_RBP_S(large, group)) >> HE_REGS_RBP_HEAD)
326 if ((ntail = rbp->tail + 1) == rbp->size)
328 if (ntail == rbp->head)
333 /* allocate the MBUF */
334 if ((m = m_getcl(M_NOWAIT, MT_DATA,
335 M_PKTHDR)) == NULL) {
337 "no mbuf clusters\n");
340 m->m_data += MBUFL_OFFSET;
342 if (sc->lbufs[sc->lbufs_next] != NULL)
343 panic("hatm: lbufs full %u", sc->lbufs_next);
344 sc->lbufs[sc->lbufs_next] = m;
346 if ((error = bus_dmamap_load(sc->mbuf_tag,
347 sc->rmaps[sc->lbufs_next],
348 m->m_data, rbp->bsize, hatm_mbuf_helper,
349 &rbp->rbp[rbp->tail].phys, BUS_DMA_NOWAIT)) != 0)
350 panic("hatm: mbuf mapping failed %d", error);
352 bus_dmamap_sync(sc->mbuf_tag,
353 sc->rmaps[sc->lbufs_next],
354 BUS_DMASYNC_PREREAD);
356 rbp->rbp[rbp->tail].handle =
357 MBUF_MAKE_LHANDLE(sc->lbufs_next);
359 if (++sc->lbufs_next == sc->lbufs_size)
362 } else if (group == 0) {
364 * Allocate small buffer in group 0
366 if ((cf = hatm_ext_alloc(sc, 0)) == NULL)
368 buf0 = (struct mbuf0_chunk *)cf;
369 pg = sc->mbuf_pages[buf0->hdr.pageno];
370 buf0->hdr.flags |= MBUF_CARD;
371 rbp->rbp[rbp->tail].phys = pg->hdr.phys +
372 buf0->hdr.chunkno * MBUF0_CHUNK + MBUF0_OFFSET;
373 rbp->rbp[rbp->tail].handle =
374 MBUF_MAKE_HANDLE(buf0->hdr.pageno,
377 bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map,
378 BUS_DMASYNC_PREREAD);
380 } else if (group == 1) {
382 * Allocate small buffer in group 1
384 if ((cf = hatm_ext_alloc(sc, 1)) == NULL)
386 buf1 = (struct mbuf1_chunk *)cf;
387 pg = sc->mbuf_pages[buf1->hdr.pageno];
388 buf1->hdr.flags |= MBUF_CARD;
389 rbp->rbp[rbp->tail].phys = pg->hdr.phys +
390 buf1->hdr.chunkno * MBUF1_CHUNK + MBUF1_OFFSET;
391 rbp->rbp[rbp->tail].handle =
392 MBUF_MAKE_HANDLE(buf1->hdr.pageno,
395 bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map,
396 BUS_DMASYNC_PREREAD);
402 DBG(sc, DMA, ("MBUF loaded: handle=%x m=%p phys=%x",
403 rbp->rbp[rbp->tail].handle, m, rbp->rbp[rbp->tail].phys));
407 WRITE4(sc, HE_REGO_RBP_T(large, group),
408 (rbp->tail << HE_REGS_RBP_TAIL));
412 * Extract the buffer and hand it to the receive routine
415 hatm_rx_buffer(struct hatm_softc *sc, u_int group, u_int handle)
421 if (handle & MBUF_LARGE_FLAG) {
422 /* large buffer - sync and unload */
423 MBUF_PARSE_LHANDLE(handle, handle);
424 DBG(sc, RX, ("RX large handle=%x", handle));
426 bus_dmamap_sync(sc->mbuf_tag, sc->rmaps[handle],
427 BUS_DMASYNC_POSTREAD);
428 bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[handle]);
430 m = sc->lbufs[handle];
431 sc->lbufs[handle] = NULL;
436 MBUF_PARSE_HANDLE(handle, pageno, chunkno);
438 DBG(sc, RX, ("RX group=%u handle=%x page=%u chunk=%u", group, handle,
441 MGETHDR(m, M_NOWAIT, MT_DATA);
444 struct mbuf0_chunk *c0;
446 c0 = (struct mbuf0_chunk *)sc->mbuf_pages[pageno] + chunkno;
447 KASSERT(c0->hdr.pageno == pageno, ("pageno = %u/%u",
448 c0->hdr.pageno, pageno));
449 KASSERT(c0->hdr.chunkno == chunkno, ("chunkno = %u/%u",
450 c0->hdr.chunkno, chunkno));
451 KASSERT(c0->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u",
453 KASSERT(!(c0->hdr.flags & MBUF_USED), ("used mbuf %u/%u",
456 c0->hdr.flags |= MBUF_USED;
457 c0->hdr.flags &= ~MBUF_CARD;
460 m->m_ext.ext_cnt = &c0->hdr.ref_cnt;
461 MEXTADD(m, (void *)c0, MBUF0_SIZE,
462 hatm_mbuf0_free, c0, sc, M_PKTHDR, EXT_EXTREF);
463 m->m_data += MBUF0_OFFSET;
465 (void)hatm_mbuf0_free(NULL, c0, sc);
468 struct mbuf1_chunk *c1;
470 c1 = (struct mbuf1_chunk *)sc->mbuf_pages[pageno] + chunkno;
471 KASSERT(c1->hdr.pageno == pageno, ("pageno = %u/%u",
472 c1->hdr.pageno, pageno));
473 KASSERT(c1->hdr.chunkno == chunkno, ("chunkno = %u/%u",
474 c1->hdr.chunkno, chunkno));
475 KASSERT(c1->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u",
477 KASSERT(!(c1->hdr.flags & MBUF_USED), ("used mbuf %u/%u",
480 c1->hdr.flags |= MBUF_USED;
481 c1->hdr.flags &= ~MBUF_CARD;
484 m->m_ext.ext_cnt = &c1->hdr.ref_cnt;
485 MEXTADD(m, (void *)c1, MBUF1_SIZE,
486 hatm_mbuf1_free, c1, sc, M_PKTHDR, EXT_EXTREF);
487 m->m_data += MBUF1_OFFSET;
489 (void)hatm_mbuf1_free(NULL, c1, sc);
496 * Interrupt because of receive buffer returned.
499 he_intr_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group)
502 uint32_t flags, tail;
507 tail = sc->hsp->group[group].rbrq_tail >> 3;
509 if (rq->head == tail)
512 e = &rq->rbrq[rq->head];
514 flags = e->addr & HE_REGM_RBRQ_FLAGS;
515 if (!(flags & HE_REGM_RBRQ_HBUF_ERROR))
516 m = hatm_rx_buffer(sc, group, e->addr);
520 cid = (e->len & HE_REGM_RBRQ_CID) >> HE_REGS_RBRQ_CID;
521 len = 4 * (e->len & HE_REGM_RBRQ_LEN);
523 hatm_rx(sc, cid, flags, m, len);
525 if (++rq->head == rq->size)
528 WRITE4(sc, HE_REGO_RBRQ_H(group), rq->head << 3);
535 struct hatm_softc *sc = q->sc;
539 /* if we have a stray interrupt with a non-initialized card,
540 * we cannot even lock before looking at the flag */
541 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
545 (void)READ4(sc, HE_REGO_INT_FIFO);
548 if (q->head == tail) {
549 /* workaround for tail pointer not updated bug (8.1.1) */
550 DBG(sc, INTR, ("hatm: intr tailq not updated bug triggered"));
552 /* read the tail pointer from the card */
553 tail = READ4(sc, HE_REGO_IRQ_BASE(q->group)) &
554 HE_REGM_IRQ_BASE_TAIL;
557 sc->istats.bug_no_irq_upd++;
560 /* clear the interrupt */
561 WRITE4(sc, HE_REGO_INT_FIFO, HE_REGM_INT_FIFO_CLRA);
564 while (q->head != tail) {
565 status = q->irq[q->head];
566 q->irq[q->head] = HE_REGM_ITYPE_INVALID;
567 if (++q->head == (q->size - 1))
570 switch (status & HE_REGM_ITYPE) {
572 case HE_REGM_ITYPE_TBRQ:
573 DBG(sc, INTR, ("TBRQ treshold %u", status & HE_REGM_IGROUP));
574 sc->istats.itype_tbrq++;
575 he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP);
578 case HE_REGM_ITYPE_TPD:
579 DBG(sc, INTR, ("TPD ready %u", status & HE_REGM_IGROUP));
580 sc->istats.itype_tpd++;
581 he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP);
584 case HE_REGM_ITYPE_RBPS:
585 sc->istats.itype_rbps++;
586 switch (status & HE_REGM_IGROUP) {
589 he_intr_rbp(sc, &sc->rbp_s0, 0, 0);
593 he_intr_rbp(sc, &sc->rbp_s1, 0, 1);
597 if_printf(sc->ifp, "bad INTR RBPS%u\n",
598 status & HE_REGM_IGROUP);
603 case HE_REGM_ITYPE_RBPL:
604 sc->istats.itype_rbpl++;
605 switch (status & HE_REGM_IGROUP) {
608 he_intr_rbp(sc, &sc->rbp_l0, 1, 0);
612 if_printf(sc->ifp, "bad INTR RBPL%u\n",
613 status & HE_REGM_IGROUP);
618 case HE_REGM_ITYPE_RBRQ:
619 DBG(sc, INTR, ("INTERRUPT RBRQ %u", status & HE_REGM_IGROUP));
620 sc->istats.itype_rbrq++;
621 switch (status & HE_REGM_IGROUP) {
624 he_intr_rbrq(sc, &sc->rbrq_0, 0);
628 if (sc->rbrq_1.size > 0) {
629 he_intr_rbrq(sc, &sc->rbrq_1, 1);
635 if_printf(sc->ifp, "bad INTR RBRQ%u\n",
636 status & HE_REGM_IGROUP);
641 case HE_REGM_ITYPE_RBRQT:
642 DBG(sc, INTR, ("INTERRUPT RBRQT %u", status & HE_REGM_IGROUP));
643 sc->istats.itype_rbrqt++;
644 switch (status & HE_REGM_IGROUP) {
647 he_intr_rbrq(sc, &sc->rbrq_0, 0);
651 if (sc->rbrq_1.size > 0) {
652 he_intr_rbrq(sc, &sc->rbrq_1, 1);
658 if_printf(sc->ifp, "bad INTR RBRQT%u\n",
659 status & HE_REGM_IGROUP);
664 case HE_REGM_ITYPE_PHYS:
665 sc->istats.itype_phys++;
666 utopia_intr(&sc->utopia);
669 #if HE_REGM_ITYPE_UNKNOWN != HE_REGM_ITYPE_INVALID
670 case HE_REGM_ITYPE_UNKNOWN:
671 sc->istats.itype_unknown++;
672 if_printf(sc->ifp, "bad interrupt\n");
676 case HE_REGM_ITYPE_ERR:
677 sc->istats.itype_err++;
680 case HE_REGM_ITYPE_PERR:
681 if_printf(sc->ifp, "parity error\n");
684 case HE_REGM_ITYPE_ABORT:
685 if_printf(sc->ifp, "abort interrupt "
687 READ4(sc, HE_REGO_ABORT_ADDR));
692 "bad interrupt type %08x\n", status);
697 case HE_REGM_ITYPE_INVALID:
698 /* this is the documented fix for the ISW bug 8.1.1
699 * Note, that the documented fix is partly wrong:
700 * the ISWs should be intialized to 0xf8 not 0xff */
701 sc->istats.bug_bad_isw++;
702 DBG(sc, INTR, ("hatm: invalid ISW bug triggered"));
703 he_intr_tbrq(sc, &sc->tbrq, 0);
704 he_intr_rbp(sc, &sc->rbp_s0, 0, 0);
705 he_intr_rbp(sc, &sc->rbp_l0, 1, 0);
706 he_intr_rbp(sc, &sc->rbp_s1, 0, 1);
707 he_intr_rbrq(sc, &sc->rbrq_0, 0);
708 he_intr_rbrq(sc, &sc->rbrq_1, 1);
709 utopia_intr(&sc->utopia);
713 if_printf(sc->ifp, "bad interrupt type %08x\n",
719 /* write back head to clear queue */
720 WRITE4(sc, HE_REGO_IRQ_HEAD(0),
721 ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) |
722 (q->thresh << HE_REGS_IRQ_HEAD_THRESH) |
723 (q->head << HE_REGS_IRQ_HEAD_HEAD));
726 /* workaround the back-to-back irq access problem (8.1.2) */
727 (void)READ4(sc, HE_REGO_INT_FIFO);
730 mtx_unlock(&sc->mtx);