2 * Copyright (c) 2001-2003
3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus).
5 * Author: Hartmut Brandt <harti@freebsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
47 #include <sys/errno.h>
49 #include <sys/module.h>
50 #include <sys/queue.h>
51 #include <sys/syslog.h>
52 #include <sys/condvar.h>
53 #include <sys/sysctl.h>
56 #include <sys/sockio.h>
58 #include <sys/socket.h>
61 #include <net/if_media.h>
62 #include <net/if_atm.h>
63 #include <net/route.h>
64 #include <netinet/in.h>
65 #include <netinet/if_atm.h>
67 #include <machine/bus.h>
68 #include <machine/resource.h>
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
74 #include <dev/utopia/utopia.h>
75 #include <dev/hatm/if_hatmconf.h>
76 #include <dev/hatm/if_hatmreg.h>
77 #include <dev/hatm/if_hatmvar.h>
79 CTASSERT(sizeof(struct mbuf_page) == MBUF_ALLOC_SIZE);
80 CTASSERT(sizeof(struct mbuf0_chunk) == MBUF0_CHUNK);
81 CTASSERT(sizeof(struct mbuf1_chunk) == MBUF1_CHUNK);
82 CTASSERT(sizeof(((struct mbuf0_chunk *)NULL)->storage) >= MBUF0_SIZE);
83 CTASSERT(sizeof(((struct mbuf1_chunk *)NULL)->storage) >= MBUF1_SIZE);
84 CTASSERT(sizeof(struct tpd) <= HE_TPD_SIZE);
86 CTASSERT(MBUF0_PER_PAGE <= 256);
87 CTASSERT(MBUF1_PER_PAGE <= 256);
89 static void hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group);
92 * Free an external mbuf to a list. We use atomic functions so that
93 * we don't need a mutex for the list.
95 * Note that in general this algorithm is not safe when multiple readers
96 * and writers are present. To cite from a mail from David Schultz
99 * It looks like this is subject to the ABA problem. For instance,
100 * suppose X, Y, and Z are the top things on the freelist and a
101 * thread attempts to make an allocation. You set buf to X and load
102 * buf->link (Y) into a register. Then the thread get preempted, and
103 * another thread allocates both X and Y, then frees X. When the
104 * original thread gets the CPU again, X is still on top of the
105 * freelist, so the atomic operation succeeds. However, the atomic
106 * op places Y on top of the freelist, even though Y is no longer
109 * We are, however sure that we have only one thread that ever allocates
110 * buffers because the only place we're call from is the interrupt handler.
111 * Under these circumstances the code looks safe.
114 hatm_ext_free(struct mbufx_free **list, struct mbufx_free *buf)
118 if (atomic_cmpset_ptr((uintptr_t *)list, (uintptr_t)buf->link,
124 static __inline struct mbufx_free *
125 hatm_ext_alloc(struct hatm_softc *sc, u_int g)
127 struct mbufx_free *buf;
130 if ((buf = sc->mbuf_list[g]) == NULL)
132 if (atomic_cmpset_ptr((uintptr_t *)&sc->mbuf_list[g],
133 (uintptr_t)buf, (uintptr_t)buf->link))
137 hatm_mbuf_page_alloc(sc, g);
139 if ((buf = sc->mbuf_list[g]) == NULL)
141 if (atomic_cmpset_ptr((uintptr_t *)&sc->mbuf_list[g],
142 (uintptr_t)buf, (uintptr_t)buf->link))
150 * Either the queue treshold was crossed or a TPD with the INTR bit set
154 he_intr_tbrq(struct hatm_softc *sc, struct hetbrq *q, u_int group)
156 uint32_t *tailp = &sc->hsp->group[group].tbrq_tail;
159 while (q->head != (*tailp >> 2)) {
160 no = (q->tbrq[q->head].addr & HE_REGM_TBRQ_ADDR) >>
162 hatm_tx_complete(sc, TPD_ADDR(sc, no),
163 (q->tbrq[q->head].addr & HE_REGM_TBRQ_FLAGS));
165 if (++q->head == q->size)
168 WRITE4(sc, HE_REGO_TBRQ_H(group), q->head << 2);
172 * DMA loader function for external mbuf page.
175 hatm_extbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs,
179 printf("%s: mapping error %d\n", __func__, error);
183 ("too many segments for DMA: %d", nsegs));
184 KASSERT(segs[0].ds_addr <= 0xffffffffLU,
185 ("phys addr too large %lx", (u_long)segs[0].ds_addr));
187 *(uint32_t *)arg = segs[0].ds_addr;
191 * Allocate a page of external mbuf storage for the small pools.
192 * Create a DMA map and load it. Put all the chunks onto the right
196 hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group)
198 struct mbuf_page *pg;
202 if (sc->mbuf_npages == sc->mbuf_max_pages)
204 if ((pg = malloc(MBUF_ALLOC_SIZE, M_DEVBUF, M_NOWAIT)) == NULL)
207 err = bus_dmamap_create(sc->mbuf_tag, 0, &pg->hdr.map);
209 if_printf(sc->ifp, "%s -- bus_dmamap_create: %d\n",
214 err = bus_dmamap_load(sc->mbuf_tag, pg->hdr.map, pg, MBUF_ALLOC_SIZE,
215 hatm_extbuf_helper, &pg->hdr.phys, BUS_DMA_NOWAIT);
217 if_printf(sc->ifp, "%s -- mbuf mapping failed %d\n",
219 bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map);
224 sc->mbuf_pages[sc->mbuf_npages] = pg;
227 struct mbuf0_chunk *c;
230 pg->hdr.nchunks = MBUF0_PER_PAGE;
231 pg->hdr.chunksize = MBUF0_CHUNK;
232 pg->hdr.hdroff = sizeof(c->storage);
233 c = (struct mbuf0_chunk *)pg;
234 for (i = 0; i < MBUF0_PER_PAGE; i++, c++) {
235 c->hdr.pageno = sc->mbuf_npages;
238 hatm_ext_free(&sc->mbuf_list[0],
239 (struct mbufx_free *)c);
242 struct mbuf1_chunk *c;
245 pg->hdr.nchunks = MBUF1_PER_PAGE;
246 pg->hdr.chunksize = MBUF1_CHUNK;
247 pg->hdr.hdroff = sizeof(c->storage);
248 c = (struct mbuf1_chunk *)pg;
249 for (i = 0; i < MBUF1_PER_PAGE; i++, c++) {
250 c->hdr.pageno = sc->mbuf_npages;
253 hatm_ext_free(&sc->mbuf_list[1],
254 (struct mbufx_free *)c);
261 * Free an mbuf and put it onto the free list.
264 hatm_mbuf0_free(struct mbuf *m, void *buf, void *args)
266 struct hatm_softc *sc = args;
267 struct mbuf0_chunk *c = buf;
269 KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED,
270 ("freeing unused mbuf %x", c->hdr.flags));
271 c->hdr.flags &= ~MBUF_USED;
272 hatm_ext_free(&sc->mbuf_list[0], (struct mbufx_free *)c);
273 return (EXT_FREE_OK);
276 hatm_mbuf1_free(struct mbuf *m, void *buf, void *args)
278 struct hatm_softc *sc = args;
279 struct mbuf1_chunk *c = buf;
281 KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED,
282 ("freeing unused mbuf %x", c->hdr.flags));
283 c->hdr.flags &= ~MBUF_USED;
284 hatm_ext_free(&sc->mbuf_list[1], (struct mbufx_free *)c);
285 return (EXT_FREE_OK);
289 hatm_mbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
291 uint32_t *ptr = (uint32_t *)arg;
294 printf("%s: error=%d\n", __func__, error);
297 KASSERT(nsegs == 1, ("too many segments for mbuf: %d", nsegs));
298 KASSERT(segs[0].ds_addr <= 0xffffffffLU,
299 ("phys addr too large %lx", (u_long)segs[0].ds_addr));
301 *ptr = segs[0].ds_addr;
305 * Receive buffer pool interrupt. This means the number of entries in the
306 * queue has dropped below the threshold. Try to supply new buffers.
309 he_intr_rbp(struct hatm_softc *sc, struct herbp *rbp, u_int large,
315 struct mbufx_free *cf;
316 struct mbuf_page *pg;
317 struct mbuf0_chunk *buf0;
318 struct mbuf1_chunk *buf1;
320 DBG(sc, INTR, ("%s buffer supply threshold crossed for group %u",
321 large ? "large" : "small", group));
323 rbp->head = (READ4(sc, HE_REGO_RBP_S(large, group)) >> HE_REGS_RBP_HEAD)
327 if ((ntail = rbp->tail + 1) == rbp->size)
329 if (ntail == rbp->head)
334 /* allocate the MBUF */
335 if ((m = m_getcl(M_NOWAIT, MT_DATA,
336 M_PKTHDR)) == NULL) {
338 "no mbuf clusters\n");
341 m->m_data += MBUFL_OFFSET;
343 if (sc->lbufs[sc->lbufs_next] != NULL)
344 panic("hatm: lbufs full %u", sc->lbufs_next);
345 sc->lbufs[sc->lbufs_next] = m;
347 if ((error = bus_dmamap_load(sc->mbuf_tag,
348 sc->rmaps[sc->lbufs_next],
349 m->m_data, rbp->bsize, hatm_mbuf_helper,
350 &rbp->rbp[rbp->tail].phys, BUS_DMA_NOWAIT)) != 0)
351 panic("hatm: mbuf mapping failed %d", error);
353 bus_dmamap_sync(sc->mbuf_tag,
354 sc->rmaps[sc->lbufs_next],
355 BUS_DMASYNC_PREREAD);
357 rbp->rbp[rbp->tail].handle =
358 MBUF_MAKE_LHANDLE(sc->lbufs_next);
360 if (++sc->lbufs_next == sc->lbufs_size)
363 } else if (group == 0) {
365 * Allocate small buffer in group 0
367 if ((cf = hatm_ext_alloc(sc, 0)) == NULL)
369 buf0 = (struct mbuf0_chunk *)cf;
370 pg = sc->mbuf_pages[buf0->hdr.pageno];
371 buf0->hdr.flags |= MBUF_CARD;
372 rbp->rbp[rbp->tail].phys = pg->hdr.phys +
373 buf0->hdr.chunkno * MBUF0_CHUNK + MBUF0_OFFSET;
374 rbp->rbp[rbp->tail].handle =
375 MBUF_MAKE_HANDLE(buf0->hdr.pageno,
378 bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map,
379 BUS_DMASYNC_PREREAD);
381 } else if (group == 1) {
383 * Allocate small buffer in group 1
385 if ((cf = hatm_ext_alloc(sc, 1)) == NULL)
387 buf1 = (struct mbuf1_chunk *)cf;
388 pg = sc->mbuf_pages[buf1->hdr.pageno];
389 buf1->hdr.flags |= MBUF_CARD;
390 rbp->rbp[rbp->tail].phys = pg->hdr.phys +
391 buf1->hdr.chunkno * MBUF1_CHUNK + MBUF1_OFFSET;
392 rbp->rbp[rbp->tail].handle =
393 MBUF_MAKE_HANDLE(buf1->hdr.pageno,
396 bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map,
397 BUS_DMASYNC_PREREAD);
403 DBG(sc, DMA, ("MBUF loaded: handle=%x m=%p phys=%x",
404 rbp->rbp[rbp->tail].handle, m, rbp->rbp[rbp->tail].phys));
408 WRITE4(sc, HE_REGO_RBP_T(large, group),
409 (rbp->tail << HE_REGS_RBP_TAIL));
413 * Extract the buffer and hand it to the receive routine
416 hatm_rx_buffer(struct hatm_softc *sc, u_int group, u_int handle)
422 if (handle & MBUF_LARGE_FLAG) {
423 /* large buffer - sync and unload */
424 MBUF_PARSE_LHANDLE(handle, handle);
425 DBG(sc, RX, ("RX large handle=%x", handle));
427 bus_dmamap_sync(sc->mbuf_tag, sc->rmaps[handle],
428 BUS_DMASYNC_POSTREAD);
429 bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[handle]);
431 m = sc->lbufs[handle];
432 sc->lbufs[handle] = NULL;
437 MBUF_PARSE_HANDLE(handle, pageno, chunkno);
439 DBG(sc, RX, ("RX group=%u handle=%x page=%u chunk=%u", group, handle,
442 MGETHDR(m, M_NOWAIT, MT_DATA);
445 struct mbuf0_chunk *c0;
447 c0 = (struct mbuf0_chunk *)sc->mbuf_pages[pageno] + chunkno;
448 KASSERT(c0->hdr.pageno == pageno, ("pageno = %u/%u",
449 c0->hdr.pageno, pageno));
450 KASSERT(c0->hdr.chunkno == chunkno, ("chunkno = %u/%u",
451 c0->hdr.chunkno, chunkno));
452 KASSERT(c0->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u",
454 KASSERT(!(c0->hdr.flags & MBUF_USED), ("used mbuf %u/%u",
457 c0->hdr.flags |= MBUF_USED;
458 c0->hdr.flags &= ~MBUF_CARD;
461 m->m_ext.ref_cnt = &c0->hdr.ref_cnt;
462 MEXTADD(m, (void *)c0, MBUF0_SIZE,
463 hatm_mbuf0_free, c0, sc, M_PKTHDR, EXT_EXTREF);
464 m->m_data += MBUF0_OFFSET;
466 (void)hatm_mbuf0_free(NULL, c0, sc);
469 struct mbuf1_chunk *c1;
471 c1 = (struct mbuf1_chunk *)sc->mbuf_pages[pageno] + chunkno;
472 KASSERT(c1->hdr.pageno == pageno, ("pageno = %u/%u",
473 c1->hdr.pageno, pageno));
474 KASSERT(c1->hdr.chunkno == chunkno, ("chunkno = %u/%u",
475 c1->hdr.chunkno, chunkno));
476 KASSERT(c1->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u",
478 KASSERT(!(c1->hdr.flags & MBUF_USED), ("used mbuf %u/%u",
481 c1->hdr.flags |= MBUF_USED;
482 c1->hdr.flags &= ~MBUF_CARD;
485 m->m_ext.ref_cnt = &c1->hdr.ref_cnt;
486 MEXTADD(m, (void *)c1, MBUF1_SIZE,
487 hatm_mbuf1_free, c1, sc, M_PKTHDR, EXT_EXTREF);
488 m->m_data += MBUF1_OFFSET;
490 (void)hatm_mbuf1_free(NULL, c1, sc);
497 * Interrupt because of receive buffer returned.
500 he_intr_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group)
503 uint32_t flags, tail;
508 tail = sc->hsp->group[group].rbrq_tail >> 3;
510 if (rq->head == tail)
513 e = &rq->rbrq[rq->head];
515 flags = e->addr & HE_REGM_RBRQ_FLAGS;
516 if (!(flags & HE_REGM_RBRQ_HBUF_ERROR))
517 m = hatm_rx_buffer(sc, group, e->addr);
521 cid = (e->len & HE_REGM_RBRQ_CID) >> HE_REGS_RBRQ_CID;
522 len = 4 * (e->len & HE_REGM_RBRQ_LEN);
524 hatm_rx(sc, cid, flags, m, len);
526 if (++rq->head == rq->size)
529 WRITE4(sc, HE_REGO_RBRQ_H(group), rq->head << 3);
536 struct hatm_softc *sc = q->sc;
540 /* if we have a stray interrupt with a non-initialized card,
541 * we cannot even lock before looking at the flag */
542 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
546 (void)READ4(sc, HE_REGO_INT_FIFO);
549 if (q->head == tail) {
550 /* workaround for tail pointer not updated bug (8.1.1) */
551 DBG(sc, INTR, ("hatm: intr tailq not updated bug triggered"));
553 /* read the tail pointer from the card */
554 tail = READ4(sc, HE_REGO_IRQ_BASE(q->group)) &
555 HE_REGM_IRQ_BASE_TAIL;
558 sc->istats.bug_no_irq_upd++;
561 /* clear the interrupt */
562 WRITE4(sc, HE_REGO_INT_FIFO, HE_REGM_INT_FIFO_CLRA);
565 while (q->head != tail) {
566 status = q->irq[q->head];
567 q->irq[q->head] = HE_REGM_ITYPE_INVALID;
568 if (++q->head == (q->size - 1))
571 switch (status & HE_REGM_ITYPE) {
573 case HE_REGM_ITYPE_TBRQ:
574 DBG(sc, INTR, ("TBRQ treshold %u", status & HE_REGM_IGROUP));
575 sc->istats.itype_tbrq++;
576 he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP);
579 case HE_REGM_ITYPE_TPD:
580 DBG(sc, INTR, ("TPD ready %u", status & HE_REGM_IGROUP));
581 sc->istats.itype_tpd++;
582 he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP);
585 case HE_REGM_ITYPE_RBPS:
586 sc->istats.itype_rbps++;
587 switch (status & HE_REGM_IGROUP) {
590 he_intr_rbp(sc, &sc->rbp_s0, 0, 0);
594 he_intr_rbp(sc, &sc->rbp_s1, 0, 1);
598 if_printf(sc->ifp, "bad INTR RBPS%u\n",
599 status & HE_REGM_IGROUP);
604 case HE_REGM_ITYPE_RBPL:
605 sc->istats.itype_rbpl++;
606 switch (status & HE_REGM_IGROUP) {
609 he_intr_rbp(sc, &sc->rbp_l0, 1, 0);
613 if_printf(sc->ifp, "bad INTR RBPL%u\n",
614 status & HE_REGM_IGROUP);
619 case HE_REGM_ITYPE_RBRQ:
620 DBG(sc, INTR, ("INTERRUPT RBRQ %u", status & HE_REGM_IGROUP));
621 sc->istats.itype_rbrq++;
622 switch (status & HE_REGM_IGROUP) {
625 he_intr_rbrq(sc, &sc->rbrq_0, 0);
629 if (sc->rbrq_1.size > 0) {
630 he_intr_rbrq(sc, &sc->rbrq_1, 1);
636 if_printf(sc->ifp, "bad INTR RBRQ%u\n",
637 status & HE_REGM_IGROUP);
642 case HE_REGM_ITYPE_RBRQT:
643 DBG(sc, INTR, ("INTERRUPT RBRQT %u", status & HE_REGM_IGROUP));
644 sc->istats.itype_rbrqt++;
645 switch (status & HE_REGM_IGROUP) {
648 he_intr_rbrq(sc, &sc->rbrq_0, 0);
652 if (sc->rbrq_1.size > 0) {
653 he_intr_rbrq(sc, &sc->rbrq_1, 1);
659 if_printf(sc->ifp, "bad INTR RBRQT%u\n",
660 status & HE_REGM_IGROUP);
665 case HE_REGM_ITYPE_PHYS:
666 sc->istats.itype_phys++;
667 utopia_intr(&sc->utopia);
670 #if HE_REGM_ITYPE_UNKNOWN != HE_REGM_ITYPE_INVALID
671 case HE_REGM_ITYPE_UNKNOWN:
672 sc->istats.itype_unknown++;
673 if_printf(sc->ifp, "bad interrupt\n");
677 case HE_REGM_ITYPE_ERR:
678 sc->istats.itype_err++;
681 case HE_REGM_ITYPE_PERR:
682 if_printf(sc->ifp, "parity error\n");
685 case HE_REGM_ITYPE_ABORT:
686 if_printf(sc->ifp, "abort interrupt "
688 READ4(sc, HE_REGO_ABORT_ADDR));
693 "bad interrupt type %08x\n", status);
698 case HE_REGM_ITYPE_INVALID:
699 /* this is the documented fix for the ISW bug 8.1.1
700 * Note, that the documented fix is partly wrong:
701 * the ISWs should be intialized to 0xf8 not 0xff */
702 sc->istats.bug_bad_isw++;
703 DBG(sc, INTR, ("hatm: invalid ISW bug triggered"));
704 he_intr_tbrq(sc, &sc->tbrq, 0);
705 he_intr_rbp(sc, &sc->rbp_s0, 0, 0);
706 he_intr_rbp(sc, &sc->rbp_l0, 1, 0);
707 he_intr_rbp(sc, &sc->rbp_s1, 0, 1);
708 he_intr_rbrq(sc, &sc->rbrq_0, 0);
709 he_intr_rbrq(sc, &sc->rbrq_1, 1);
710 utopia_intr(&sc->utopia);
714 if_printf(sc->ifp, "bad interrupt type %08x\n",
720 /* write back head to clear queue */
721 WRITE4(sc, HE_REGO_IRQ_HEAD(0),
722 ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) |
723 (q->thresh << HE_REGS_IRQ_HEAD_THRESH) |
724 (q->head << HE_REGS_IRQ_HEAD_HEAD));
727 /* workaround the back-to-back irq access problem (8.1.2) */
728 (void)READ4(sc, HE_REGO_INT_FIFO);
731 mtx_unlock(&sc->mtx);