3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus).
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * The TST allocation algorithm is from the IDT driver which is:
29 * Copyright (c) 2000, 2001 Richard Hodges and Matriplex, inc.
30 * All rights reserved.
32 * Copyright (c) 1996, 1997, 1998, 1999 Mark Tinguely
33 * All rights reserved.
35 * Author: Hartmut Brandt <harti@freebsd.org>
37 * Driver for IDT77252 based cards like ProSum's.
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
46 #include <sys/types.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
50 #include <sys/kernel.h>
52 #include <sys/errno.h>
54 #include <sys/module.h>
56 #include <sys/mutex.h>
57 #include <sys/sysctl.h>
58 #include <sys/queue.h>
59 #include <sys/condvar.h>
60 #include <sys/endian.h>
63 #include <sys/sockio.h>
65 #include <sys/socket.h>
68 #include <net/if_media.h>
69 #include <net/if_atm.h>
70 #include <net/route.h>
74 #include <netinet/in.h>
75 #include <netinet/if_atm.h>
77 #include <machine/bus.h>
78 #include <machine/resource.h>
81 #include <sys/mbpool.h>
83 #include <dev/utopia/utopia.h>
84 #include <dev/patm/idt77252reg.h>
85 #include <dev/patm/if_patmvar.h>
87 static struct mbuf *patm_tx_pad(struct patm_softc *sc, struct mbuf *m0);
88 static void patm_launch(struct patm_softc *sc, struct patm_scd *scd);
90 static struct patm_txmap *patm_txmap_get(struct patm_softc *);
91 static void patm_load_txbuf(void *, bus_dma_segment_t *, int,
94 static void patm_tst_alloc(struct patm_softc *sc, struct patm_vcc *vcc);
95 static void patm_tst_free(struct patm_softc *sc, struct patm_vcc *vcc);
96 static void patm_tst_timer(void *p);
97 static void patm_tst_update(struct patm_softc *);
99 static void patm_tct_start(struct patm_softc *sc, struct patm_vcc *);
101 static const char *dump_scd(struct patm_softc *sc, struct patm_scd *scd)
103 static void patm_tct_print(struct patm_softc *sc, u_int cid) __unused;
106 * Structure for communication with the loader function for transmission
109 struct patm_softc *sc;
110 struct patm_scd *scd; /* scheduling channel */
111 struct patm_vcc *vcc; /* the VCC of this PDU */
113 u_int hdr; /* cell header */
116 static __inline u_int
117 cbr2slots(struct patm_softc *sc, struct patm_vcc *vcc)
119 /* compute the number of slots we need, make sure to get at least
120 * the specified PCR */
121 return ((u_int)(((uint64_t)(sc->mmap->tst_size - 1) *
122 vcc->vcc.tparam.pcr + IFP2IFATM(sc->ifp)->mib.pcr - 1) / IFP2IFATM(sc->ifp)->mib.pcr));
125 static __inline u_int
126 slots2cr(struct patm_softc *sc, u_int slots)
128 return ((slots * IFP2IFATM(sc->ifp)->mib.pcr + sc->mmap->tst_size - 2) /
129 (sc->mmap->tst_size - 1));
132 /* check if we can open this one */
134 patm_tx_vcc_can_open(struct patm_softc *sc, struct patm_vcc *vcc)
137 /* check resources */
138 switch (vcc->vcc.traffic) {
140 case ATMIO_TRAFFIC_CBR:
142 u_int slots = cbr2slots(sc, vcc);
144 if (slots > sc->tst_free + sc->tst_reserve)
149 case ATMIO_TRAFFIC_VBR:
150 if (vcc->vcc.tparam.scr > sc->bwrem)
152 if (vcc->vcc.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr)
154 if (vcc->vcc.tparam.scr > vcc->vcc.tparam.pcr ||
155 vcc->vcc.tparam.mbs == 0)
159 case ATMIO_TRAFFIC_ABR:
160 if (vcc->vcc.tparam.tbe == 0 ||
161 vcc->vcc.tparam.nrm == 0)
162 /* needed to compute CRM */
164 if (vcc->vcc.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr ||
165 vcc->vcc.tparam.icr > vcc->vcc.tparam.pcr ||
166 vcc->vcc.tparam.mcr > vcc->vcc.tparam.icr)
168 if (vcc->vcc.tparam.mcr > sc->bwrem ||
169 vcc->vcc.tparam.icr > sc->bwrem)
177 #define NEXT_TAG(T) do { \
178 (T) = ((T) + 1) % IDT_TSQE_TAG_SPACE; \
185 patm_tx_vcc_open(struct patm_softc *sc, struct patm_vcc *vcc)
187 struct patm_scd *scd;
189 if (vcc->vcc.traffic == ATMIO_TRAFFIC_UBR) {
192 vcc->vflags |= PATM_VCC_TX_OPEN;
197 scd = patm_scd_alloc(sc);
199 /* should not happen */
200 patm_printf(sc, "out of SCDs\n");
204 patm_scd_setup(sc, scd);
205 patm_tct_setup(sc, scd, vcc);
207 if (vcc->vcc.traffic != ATMIO_TRAFFIC_CBR)
208 patm_tct_start(sc, vcc);
210 vcc->vflags |= PATM_VCC_TX_OPEN;
214 * close the given vcc for transmission
217 patm_tx_vcc_close(struct patm_softc *sc, struct patm_vcc *vcc)
219 struct patm_scd *scd;
222 vcc->vflags |= PATM_VCC_TX_CLOSING;
224 if (vcc->vcc.traffic == ATMIO_TRAFFIC_UBR) {
225 /* let the queue PDUs go out */
227 vcc->vflags &= ~(PATM_VCC_TX_OPEN | PATM_VCC_TX_CLOSING);
232 /* empty the waitq */
234 _IF_DEQUEUE(&scd->q, m);
240 if (scd->num_on_card == 0) {
242 vcc->vflags &= ~PATM_VCC_TX_OPEN;
244 if (vcc->vcc.traffic == ATMIO_TRAFFIC_CBR)
245 patm_tst_free(sc, vcc);
247 patm_sram_write4(sc, scd->sram + 0, 0, 0, 0, 0);
248 patm_sram_write4(sc, scd->sram + 4, 0, 0, 0, 0);
249 patm_scd_free(sc, scd);
252 vcc->vflags &= ~PATM_VCC_TX_CLOSING;
257 /* speed up transmission */
258 patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_UIER(vcc->cid, 0xff));
259 patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_ULACR(vcc->cid, 0xff));
261 /* wait for the interrupt to drop the number to 0 */
262 patm_debug(sc, VCC, "%u buffers still on card", scd->num_on_card);
265 /* transmission side finally closed */
267 patm_tx_vcc_closed(struct patm_softc *sc, struct patm_vcc *vcc)
270 patm_debug(sc, VCC, "%u.%u TX closed", vcc->vcc.vpi, vcc->vcc.vci);
272 if (vcc->vcc.traffic == ATMIO_TRAFFIC_VBR)
273 sc->bwrem += vcc->vcc.tparam.scr;
277 * Pull off packets from the interface queue and try to transmit them.
278 * If the transmission fails because of a full transmit channel, we drop
279 * packets for CBR and queue them for other channels up to limit.
280 * This limit should depend on the CDVT for VBR and ABR, but it doesn't.
283 patm_start(struct ifnet *ifp)
285 struct patm_softc *sc = ifp->if_softc;
287 struct atm_pseudohdr *aph;
289 struct patm_vcc *vcc;
292 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
293 mtx_unlock(&sc->mtx);
299 IF_DEQUEUE(&ifp->if_snd, m);
303 /* split of pseudo header */
304 if (m->m_len < sizeof(*aph) &&
305 (m = m_pullup(m, sizeof(*aph))) == NULL) {
306 sc->ifp->if_oerrors++;
310 aph = mtod(m, struct atm_pseudohdr *);
311 vci = ATM_PH_VCI(aph);
312 vpi = ATM_PH_VPI(aph);
313 m_adj(m, sizeof(*aph));
315 /* reject empty packets */
316 if (m->m_pkthdr.len == 0) {
318 sc->ifp->if_oerrors++;
322 /* check whether this is a legal vcc */
323 if (!LEGAL_VPI(sc, vpi) || !LEGAL_VCI(sc, vci) || vci == 0) {
325 sc->ifp->if_oerrors++;
328 cid = PATM_CID(sc, vpi, vci);
332 sc->ifp->if_oerrors++;
336 /* must be multiple of 48 if not AAL5 */
337 if (vcc->vcc.aal == ATMIO_AAL_0 ||
338 vcc->vcc.aal == ATMIO_AAL_34) {
339 /* XXX AAL3/4 format? */
340 if (m->m_pkthdr.len % 48 != 0 &&
341 (m = patm_tx_pad(sc, m)) == NULL) {
342 sc->ifp->if_oerrors++;
345 } else if (vcc->vcc.aal == ATMIO_AAL_RAW) {
346 switch (vcc->vflags & PATM_RAW_FORMAT) {
350 if (m->m_pkthdr.len != 53) {
351 sc->ifp->if_oerrors++;
358 if (m->m_pkthdr.len != 52) {
359 sc->ifp->if_oerrors++;
366 if (m->m_pkthdr.len != 64) {
367 sc->ifp->if_oerrors++;
376 m->m_pkthdr.header = vcc;
378 /* try to put it on the channels queue */
379 if (_IF_QFULL(&vcc->scd->q)) {
380 sc->ifp->if_oerrors++;
381 sc->stats.tx_qfull++;
385 _IF_ENQUEUE(&vcc->scd->q, m);
388 if (!(vcc->vcc.flags & ATMIO_FLAG_NG) &&
389 (vcc->vcc.aal == ATMIO_AAL_5) &&
390 (vcc->vcc.flags & ATM_PH_LLCSNAP))
394 /* kick the channel to life */
395 patm_launch(sc, vcc->scd);
398 mtx_unlock(&sc->mtx);
402 * Pad non-AAL5 packet to a multiple of 48-byte.
403 * We assume AAL0 only. We have still to decide on the format of AAL3/4.
406 patm_tx_pad(struct patm_softc *sc, struct mbuf *m0)
408 struct mbuf *last, *m;
409 u_int plen, pad, space;
411 plen = m_length(m0, &last);
412 if (plen != m0->m_pkthdr.len) {
413 patm_printf(sc, "%s: mbuf length mismatch %d %u\n", __func__,
414 m0->m_pkthdr.len, plen);
415 m0->m_pkthdr.len = plen;
418 sc->ifp->if_oerrors++;
424 pad = 48 - plen % 48;
425 m0->m_pkthdr.len += pad;
426 if (M_WRITABLE(last)) {
427 if (M_TRAILINGSPACE(last) >= pad) {
428 bzero(last->m_data + last->m_len, pad);
432 space = M_LEADINGSPACE(last);
433 if (space + M_TRAILINGSPACE(last) >= pad) {
434 bcopy(last->m_data, last->m_data + space, last->m_len);
435 last->m_data -= space;
436 bzero(last->m_data + last->m_len, pad);
441 MGET(m, M_DONTWAIT, MT_DATA);
444 sc->ifp->if_oerrors++;
447 bzero(mtod(m, u_char *), pad);
455 * Try to put as many packets from the channels queue onto the channel
458 patm_launch(struct patm_softc *sc, struct patm_scd *scd)
461 struct mbuf *m, *tmp;
463 struct patm_txmap *map;
469 /* limit the number of outstanding packets to the tag space */
470 while (scd->num_on_card < IDT_TSQE_TAG_SPACE) {
471 /* get the next packet */
472 _IF_DEQUEUE(&scd->q, m);
476 a.vcc = m->m_pkthdr.header;
478 /* we must know the number of segments beforehand - count
479 * this may actually give a wrong number of segments for
480 * AAL_RAW where we still need to remove the cell header */
482 for (tmp = m; tmp != NULL; tmp = tmp->m_next)
486 /* check whether there is space in the queue */
487 if (segs >= scd->space) {
489 _IF_PREPEND(&scd->q, m);
490 sc->stats.tx_out_of_tbds++;
495 if ((map = patm_txmap_get(sc)) == NULL) {
496 _IF_PREPEND(&scd->q, m);
497 sc->stats.tx_out_of_maps++;
502 m->m_pkthdr.header = map;
506 if (a.vcc->vcc.aal == ATMIO_AAL_RAW) {
509 m_copydata(m, 0, 4, hdr);
510 a.hdr = (hdr[0] << 24) | (hdr[1] << 16) |
511 (hdr[2] << 8) | hdr[3];
513 switch (a.vcc->vflags & PATM_RAW_FORMAT) {
529 a.hdr = IDT_TBD_HDR(a.vcc->vcc.vpi, a.vcc->vcc.vci,
532 error = bus_dmamap_load_mbuf(sc->tx_tag, map->map, m,
533 patm_load_txbuf, &a, BUS_DMA_NOWAIT);
534 if (error == EFBIG) {
535 if ((m = m_defrag(m, M_DONTWAIT)) == NULL) {
536 sc->ifp->if_oerrors++;
539 error = bus_dmamap_load_mbuf(sc->tx_tag, map->map, m,
540 patm_load_txbuf, &a, BUS_DMA_NOWAIT);
543 sc->stats.tx_load_err++;
544 sc->ifp->if_oerrors++;
545 SLIST_INSERT_HEAD(&sc->tx_maps_free, map, link);
550 sc->ifp->if_opackets++;
555 * Load the DMA segments into the scheduling channel
558 patm_load_txbuf(void *uarg, bus_dma_segment_t *segs, int nseg,
559 bus_size_t mapsize, int error)
561 struct txarg *a= uarg;
562 struct patm_scd *scd = a->scd;
564 struct idt_tbd *tbd = NULL;
565 u_int rest = mapsize;
572 if (segs->ds_len == 0) {
573 /* transmit buffer length must be > 0 */
578 /* rest after this buffer */
579 rest -= segs->ds_len;
581 /* put together status word */
583 if (rest < 48 /* && a->vcc->vcc.aal != ATMIO_AAL_5 */)
584 /* last cell is in this buffer */
587 if (a->vcc->vcc.aal == ATMIO_AAL_5)
589 else if (a->vcc->vcc.aal == ATMIO_AAL_34)
596 /* AAL5 PDU length (unpadded) */
597 if (a->vcc->vcc.aal == ATMIO_AAL_5)
603 w1 |= IDT_TBD_TSIF | IDT_TBD_GTSI |
604 (scd->tag << IDT_TBD_TAG_SHIFT);
606 tbd = &scd->scq[scd->tail];
608 tbd->flags = htole32(w1);
609 tbd->addr = htole32(segs->ds_addr);
610 tbd->aal5 = htole32(w3);
611 tbd->hdr = htole32(a->hdr);
613 patm_debug(a->sc, TX, "TBD(%u): %08x %08x %08x %08x",
614 scd->tail, w1, segs->ds_addr, w3, a->hdr);
616 /* got to next entry */
617 if (++scd->tail == IDT_SCQ_SIZE)
626 KASSERT(rest == 0, ("bad mbuf"));
627 KASSERT(cnt > 0, ("no segs"));
628 KASSERT(scd->space > 0, ("scq full"));
630 KASSERT(scd->on_card[scd->tag] == NULL,
631 ("scd on_card wedged %u%s", scd->tag, dump_scd(a->sc, scd)));
632 scd->on_card[scd->tag] = a->mbuf;
633 a->mbuf->m_pkthdr.csum_data = cnt;
637 patm_debug(a->sc, TX, "SCD tail %u (%lx:%lx)", scd->tail,
638 (u_long)scd->phy, (u_long)scd->phy + (scd->tail << IDT_TBD_SHIFT));
639 patm_sram_write(a->sc, scd->sram,
640 scd->phy + (scd->tail << IDT_TBD_SHIFT));
642 if (patm_sram_read(a->sc, a->vcc->cid * 8 + 3) & IDT_TCT_IDLE) {
644 * if the connection is idle start it. We cannot rely
645 * on a flag set by patm_tx_idle() here, because sometimes
646 * the card seems to place an idle TSI into the TSQ but
647 * forgets to raise an interrupt.
649 patm_nor_write(a->sc, IDT_NOR_TCMDQ,
650 IDT_TCMDQ_START(a->vcc->cid));
658 patm_tx(struct patm_softc *sc, u_int stamp, u_int status)
660 u_int cid, tag, last;
662 struct patm_vcc *vcc;
663 struct patm_scd *scd;
664 struct patm_txmap *map;
666 /* get the connection */
667 cid = PATM_CID(sc, IDT_TBD_VPI(status), IDT_TBD_VCI(status));
668 if ((vcc = sc->vccs[cid]) == NULL) {
669 /* closed UBR connection */
674 tag = IDT_TSQE_TAG(stamp);
676 last = scd->last_tag;
678 patm_printf(sc, "same tag %u\n", tag);
682 /* Errata 12 requests us to free all entries up to the one
683 * with the given tag. */
685 /* next tag to try */
688 m = scd->on_card[last];
689 KASSERT(m != NULL, ("%stag=%u", dump_scd(sc, scd), tag));
690 scd->on_card[last] = NULL;
691 patm_debug(sc, TX, "ok tag=%x", last);
693 map = m->m_pkthdr.header;
694 scd->space += m->m_pkthdr.csum_data;
696 bus_dmamap_sync(sc->tx_tag, map->map,
697 BUS_DMASYNC_POSTWRITE);
698 bus_dmamap_unload(sc->tx_tag, map->map);
700 SLIST_INSERT_HEAD(&sc->tx_maps_free, map, link);
703 if (vcc->vflags & PATM_VCC_TX_CLOSING) {
704 if (scd->num_on_card == 0) {
705 /* done with this VCC */
706 if (vcc->vcc.traffic == ATMIO_TRAFFIC_CBR)
707 patm_tst_free(sc, vcc);
709 patm_sram_write4(sc, scd->sram + 0, 0, 0, 0, 0);
710 patm_sram_write4(sc, scd->sram + 4, 0, 0, 0, 0);
711 patm_scd_free(sc, scd);
714 vcc->vflags &= ~PATM_VCC_TX_CLOSING;
716 if (vcc->vcc.flags & ATMIO_FLAG_ASYNC) {
717 patm_tx_vcc_closed(sc, vcc);
718 if (!(vcc->vflags & PATM_VCC_OPEN))
719 patm_vcc_closed(sc, vcc);
721 cv_signal(&sc->vcc_cv);
724 patm_debug(sc, VCC, "%u buffers still on card",
727 if (vcc->vcc.traffic == ATMIO_TRAFFIC_ABR) {
728 /* insist on speeding up transmission for ABR */
729 patm_nor_write(sc, IDT_NOR_TCMDQ,
730 IDT_TCMDQ_UIER(vcc->cid, 0xff));
731 patm_nor_write(sc, IDT_NOR_TCMDQ,
732 IDT_TCMDQ_ULACR(vcc->cid, 0xff));
736 } while (last != tag);
739 if (vcc->vcc.traffic == ATMIO_TRAFFIC_ABR) {
742 acri = (patm_sram_read(sc, 8 * cid + 2) >> IDT_TCT_ACRI_SHIFT)
744 cps = IFP2IFATM(sc->ifp)->mib.pcr * 32 /
745 ((1 << (acri >> 10)) * (acri & 0x3ff));
747 if (cps != vcc->cps) {
748 patm_debug(sc, VCC, "ACRI=%04x CPS=%u", acri, cps);
749 ATMEV_SEND_ACR_CHANGED(IFP2IFATM(sc->ifp), vcc->vcc.vpi,
755 patm_launch(sc, scd);
759 * VBR/ABR connection went idle
760 * Either restart it or set the idle flag.
763 patm_tx_idle(struct patm_softc *sc, u_int cid)
765 struct patm_vcc *vcc;
767 patm_debug(sc, VCC, "idle %u", cid);
769 if ((vcc = sc->vccs[cid]) != NULL &&
770 (vcc->vflags & (PATM_VCC_TX_OPEN | PATM_VCC_TX_CLOSING)) != 0 &&
771 vcc->scd != NULL && (vcc->scd->num_on_card != 0 ||
772 _IF_QLEN(&vcc->scd->q) != 0)) {
774 * If there is any packet outstanding in the SCD re-activate
775 * the channel and kick it.
777 patm_nor_write(sc, IDT_NOR_TCMDQ,
778 IDT_TCMDQ_START(vcc->cid));
780 patm_launch(sc, vcc->scd);
785 * Convert a (24bit) rate to the atm-forum form
786 * Our rate is never larger than 19 bit.
797 while (cps > (1024 - 1)) {
801 return ((1 << 14) | (e << 9) | (cps & 0x1ff));
805 * Do a binary search on the log2rate table to convert the rate
806 * to its log form. This assumes that the ATM-Forum form is monotonically
807 * increasing with the plain cell rate.
810 rate2log(struct patm_softc *sc, u_int rate)
813 u_int lower, upper, mid, done, val, afr;
815 afr = cps2atmf(rate);
817 if (sc->flags & PATM_25M)
818 tbl = patm_rtables25;
820 tbl = patm_rtables155;
826 mid = (lower + upper) / 2;
827 val = tbl[mid] >> 17;
828 if (val == afr || upper == lower)
835 if (val > afr && mid > 0)
841 * Return the table index for an increase table. The increase table
842 * must be selected not by the RIF itself, but by PCR/2^RIF. Each table
843 * represents an additive increase of a cell rate that can be computed
844 * from the first table entry (the value in this entry will not be clamped
848 get_air_table(struct patm_softc *sc, u_int rif, u_int pcr)
851 u_int increase, base, lair0, ret, t, cps;
853 #define GET_ENTRY(TAB, IDX) (0xffff & ((IDX & 1) ? \
854 (tbl[512 + (IDX / 2) + 128 * (TAB)] >> 16) : \
855 (tbl[512 + (IDX / 2) + 128 * (TAB)])))
860 #define DIFF_TO_FP(D) (((D) & ((1 << MANT_BITS) - 1)) << ((D) >> MANT_BITS))
861 #define AFR_TO_INT(A) ((1 << (((A) >> 9) & 0x1f)) * \
862 (512 + ((A) & 0x1ff)) / 512 * ((A) >> 14))
864 if (sc->flags & PATM_25M)
865 tbl = patm_rtables25;
867 tbl = patm_rtables155;
868 if (rif >= patm_rtables_ntab)
869 rif = patm_rtables_ntab - 1;
870 increase = pcr >> rif;
873 for (t = 0; t < patm_rtables_ntab; t++) {
874 /* get base rate of this table */
875 base = GET_ENTRY(t, 0);
876 /* convert this to fixed point */
877 lair0 = DIFF_TO_FP(base) >> FRAC_BITS;
879 /* get the CPS from the log2rate table */
880 cps = AFR_TO_INT(tbl[lair0] >> 17) - 10;
894 patm_tct_setup(struct patm_softc *sc, struct patm_scd *scd,
895 struct patm_vcc *vcc)
900 u_int tmp, crm, rdf, cdf, air, mcr;
902 bzero(tct, sizeof(tct));
904 /* special case for UBR0 */
906 tct[0] = IDT_TCT_UBR | scd->sram;
907 tct[7] = IDT_TCT_UBR_FLG;
911 switch (vcc->vcc.traffic) {
913 case ATMIO_TRAFFIC_CBR:
914 patm_tst_alloc(sc, vcc);
915 tct[0] = IDT_TCT_CBR | scd->sram;
916 /* must account for what was really allocated */
919 case ATMIO_TRAFFIC_VBR:
920 /* compute parameters for the TCT */
921 scd->init_er = rate2log(sc, vcc->vcc.tparam.pcr);
922 scd->lacr = rate2log(sc, vcc->vcc.tparam.scr);
924 /* get the 16-bit fraction of SCR/PCR
925 * both a 24 bit. Do it the simple way. */
926 token = (uint64_t)(vcc->vcc.tparam.scr << 16) /
929 patm_debug(sc, VCC, "VBR: init_er=%u lacr=%u "
930 "token=0x%04x\n", scd->init_er, scd->lacr, token);
932 tct[0] = IDT_TCT_VBR | scd->sram;
933 tct[2] = IDT_TCT_TSIF;
934 tct[3] = IDT_TCT_IDLE | IDT_TCT_HALT;
935 tct[4] = IDT_TCT_MAXIDLE;
937 if ((mbs = vcc->vcc.tparam.mbs) > 0xff)
939 tct[6] = (mbs << 16) | token;
940 sc->bwrem -= vcc->vcc.tparam.scr;
943 case ATMIO_TRAFFIC_ABR:
944 scd->init_er = rate2log(sc, vcc->vcc.tparam.pcr);
945 scd->lacr = rate2log(sc, vcc->vcc.tparam.icr);
946 mcr = rate2log(sc, vcc->vcc.tparam.mcr);
949 tmp = vcc->vcc.tparam.tbe / vcc->vcc.tparam.nrm;
950 if (tmp * vcc->vcc.tparam.nrm < vcc->vcc.tparam.tbe)
952 for (crm = 1; tmp > (1 << crm); crm++)
957 air = get_air_table(sc, vcc->vcc.tparam.rif,
958 vcc->vcc.tparam.pcr);
960 if ((rdf = vcc->vcc.tparam.rdf) >= patm_rtables_ntab)
961 rdf = patm_rtables_ntab - 1;
962 rdf += patm_rtables_ntab + 4;
964 if ((cdf = vcc->vcc.tparam.cdf) >= patm_rtables_ntab)
965 cdf = patm_rtables_ntab - 1;
966 cdf += patm_rtables_ntab + 4;
968 patm_debug(sc, VCC, "ABR: init_er=%u lacr=%u mcr=%u "
969 "crm=%u air=%u rdf=%u cdf=%u\n", scd->init_er,
970 scd->lacr, mcr, crm, air, rdf, cdf);
972 tct[0] = IDT_TCT_ABR | scd->sram;
973 tct[1] = crm << IDT_TCT_CRM_SHIFT;
974 tct[3] = IDT_TCT_HALT | IDT_TCT_IDLE |
975 (4 << IDT_TCT_NAGE_SHIFT);
976 tct[4] = mcr << IDT_TCT_LMCR_SHIFT;
977 tct[5] = (cdf << IDT_TCT_CDF_SHIFT) |
978 (rdf << IDT_TCT_RDF_SHIFT) |
979 (air << IDT_TCT_AIR_SHIFT);
981 sc->bwrem -= vcc->vcc.tparam.mcr;
986 patm_sram_write4(sc, sram + 0, tct[0], tct[1], tct[2], tct[3]);
987 patm_sram_write4(sc, sram + 4, tct[4], tct[5], tct[6], tct[7]);
989 patm_debug(sc, VCC, "TCT[%u]: %08x %08x %08x %08x %08x %08x %08x %08x",
990 sram / 8, patm_sram_read(sc, sram + 0),
991 patm_sram_read(sc, sram + 1), patm_sram_read(sc, sram + 2),
992 patm_sram_read(sc, sram + 3), patm_sram_read(sc, sram + 4),
993 patm_sram_read(sc, sram + 5), patm_sram_read(sc, sram + 6),
994 patm_sram_read(sc, sram + 7));
1001 patm_tct_start(struct patm_softc *sc, struct patm_vcc *vcc)
1004 patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_UIER(vcc->cid,
1005 vcc->scd->init_er));
1006 patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_SLACR(vcc->cid,
1011 patm_tct_print(struct patm_softc *sc, u_int cid)
1014 u_int sram = cid * 8;
1017 patm_debug(sc, VCC, "TCT[%u]: %08x %08x %08x %08x %08x %08x %08x %08x",
1018 sram / 8, patm_sram_read(sc, sram + 0),
1019 patm_sram_read(sc, sram + 1), patm_sram_read(sc, sram + 2),
1020 patm_sram_read(sc, sram + 3), patm_sram_read(sc, sram + 4),
1021 patm_sram_read(sc, sram + 5), patm_sram_read(sc, sram + 6),
1022 patm_sram_read(sc, sram + 7));
1029 patm_scd_setup(struct patm_softc *sc, struct patm_scd *scd)
1031 patm_sram_write4(sc, scd->sram + 0,
1032 scd->phy, 0, 0xffffffff, 0);
1033 patm_sram_write4(sc, scd->sram + 4,
1036 patm_debug(sc, VCC, "SCD(%x): %08x %08x %08x %08x %08x %08x %08x %08x",
1038 patm_sram_read(sc, scd->sram + 0),
1039 patm_sram_read(sc, scd->sram + 1),
1040 patm_sram_read(sc, scd->sram + 2),
1041 patm_sram_read(sc, scd->sram + 3),
1042 patm_sram_read(sc, scd->sram + 4),
1043 patm_sram_read(sc, scd->sram + 5),
1044 patm_sram_read(sc, scd->sram + 6),
1045 patm_sram_read(sc, scd->sram + 7));
1049 * Grow the TX map table if possible
1052 patm_txmaps_grow(struct patm_softc *sc)
1055 struct patm_txmap *map;
1058 if (sc->tx_nmaps >= sc->tx_maxmaps)
1061 for (i = sc->tx_nmaps; i < sc->tx_nmaps + PATM_CFG_TXMAPS_STEP; i++) {
1062 map = uma_zalloc(sc->tx_mapzone, M_NOWAIT);
1063 err = bus_dmamap_create(sc->tx_tag, 0, &map->map);
1065 uma_zfree(sc->tx_mapzone, map);
1068 SLIST_INSERT_HEAD(&sc->tx_maps_free, map, link);
1075 * Allocate a transmission map
1077 static struct patm_txmap *
1078 patm_txmap_get(struct patm_softc *sc)
1080 struct patm_txmap *map;
1082 if ((map = SLIST_FIRST(&sc->tx_maps_free)) == NULL) {
1083 patm_txmaps_grow(sc);
1084 if ((map = SLIST_FIRST(&sc->tx_maps_free)) == NULL)
1087 SLIST_REMOVE_HEAD(&sc->tx_maps_free, link);
1092 * Look whether we are in the process of updating the TST on the chip.
1093 * If we are set the flag that we need another update.
1094 * If we are not start the update.
1096 static __inline void
1097 patm_tst_start(struct patm_softc *sc)
1100 if (!(sc->tst_state & TST_PENDING)) {
1101 sc->tst_state |= TST_PENDING;
1102 if (!(sc->tst_state & TST_WAIT)) {
1103 /* timer not running */
1104 patm_tst_update(sc);
1110 * Allocate TST entries to a CBR connection
1113 patm_tst_alloc(struct patm_softc *sc, struct patm_vcc *vcc)
1120 mtx_lock(&sc->tst_lock);
1122 /* compute the number of slots we need, make sure to get at least
1123 * the specified PCR */
1124 slots = cbr2slots(sc, vcc);
1125 vcc->scd->slots = slots;
1126 sc->bwrem -= slots2cr(sc, slots);
1128 patm_debug(sc, TST, "tst_alloc: cbr=%u link=%u tst=%u slots=%u",
1129 vcc->vcc.tparam.pcr, IFP2IFATM(sc->ifp)->mib.pcr, sc->mmap->tst_size, slots);
1131 qmax = sc->mmap->tst_size - 1;
1134 pspc = pmax / slots;
1136 pptr = pspc >> 1; /* starting point */
1144 if (sc->tst_soft[qptr] != IDT_TST_VBR) {
1145 /* used - try next */
1149 patm_debug(sc, TST, "slot[%u] = %u.%u diff=%d", qptr,
1150 vcc->vcc.vpi, vcc->vcc.vci, (int)qptr - (int)last);
1153 sc->tst_soft[qptr] = IDT_TST_CBR | vcc->cid | TST_BOTH;
1156 if ((pptr += pspc) >= pmax)
1163 mtx_unlock(&sc->tst_lock);
1167 * Free a CBR connection's TST entries
1170 patm_tst_free(struct patm_softc *sc, struct patm_vcc *vcc)
1174 mtx_lock(&sc->tst_lock);
1175 for (i = 0; i < sc->mmap->tst_size - 1; i++) {
1176 if ((sc->tst_soft[i] & IDT_TST_MASK) == vcc->cid) {
1177 sc->tst_soft[i] = IDT_TST_VBR | TST_BOTH;
1181 sc->bwrem += slots2cr(sc, vcc->scd->slots);
1183 mtx_unlock(&sc->tst_lock);
1187 * Write the soft TST into the idle incore TST and start the wait timer.
1188 * We assume that we hold the tst lock.
1191 patm_tst_update(struct patm_softc *sc)
1193 u_int flag; /* flag to clear from soft TST */
1194 u_int idle; /* the idle TST */
1195 u_int act; /* the active TST */
1198 if (sc->tst_state & TST_ACT1) {
1207 /* update the idle one */
1208 for (i = 0; i < sc->mmap->tst_size - 1; i++)
1209 if (sc->tst_soft[i] & flag) {
1210 patm_sram_write(sc, sc->tst_base[idle] + i,
1211 sc->tst_soft[i] & ~TST_BOTH);
1212 sc->tst_soft[i] &= ~flag;
1214 /* the used one jump to the idle one */
1215 patm_sram_write(sc, sc->tst_jump[act],
1216 IDT_TST_BR | (sc->tst_base[idle] << 2));
1218 /* wait for the chip to jump */
1219 sc->tst_state &= ~TST_PENDING;
1220 sc->tst_state |= TST_WAIT;
1222 callout_reset(&sc->tst_callout, 1, patm_tst_timer, sc);
1226 * Timer for TST updates
1229 patm_tst_timer(void *p)
1231 struct patm_softc *sc = p;
1232 u_int act; /* active TST */
1233 u_int now; /* current place in TST */
1235 mtx_lock(&sc->tst_lock);
1237 if (sc->tst_state & TST_WAIT) {
1238 /* ignore the PENDING state while we are waiting for
1239 * the chip to switch tables. Once the switch is done,
1240 * we will again lock at PENDING */
1241 act = (sc->tst_state & TST_ACT1) ? 1 : 0;
1242 now = patm_nor_read(sc, IDT_NOR_NOW) >> 2;
1243 if (now >= sc->tst_base[act] && now <= sc->tst_jump[act]) {
1245 callout_reset(&sc->tst_callout, 1, patm_tst_timer, sc);
1248 sc->tst_state &= ~TST_WAIT;
1249 /* change back jump */
1250 patm_sram_write(sc, sc->tst_jump[act],
1251 IDT_TST_BR | (sc->tst_base[act] << 2));
1254 sc->tst_state ^= TST_ACT1;
1257 if (sc->tst_state & TST_PENDING)
1258 /* we got another update request while the timer was running. */
1259 patm_tst_update(sc);
1262 mtx_unlock(&sc->tst_lock);
1266 dump_scd(struct patm_softc *sc, struct patm_scd *scd)
1270 for (i = 0; i < IDT_TSQE_TAG_SPACE; i++)
1271 printf("on_card[%u] = %p\n", i, scd->on_card[i]);
1272 printf("space=%u tag=%u num_on_card=%u last_tag=%u\n",
1273 scd->space, scd->tag, scd->num_on_card, scd->last_tag);