1 /* $NetBSD: midway.c,v 1.30 1997/09/29 17:40:38 chuck Exp $ */
2 /* (sync'd to midway.c 1.68) */
5 * Copyright (c) 1996 Charles D. Cranor and Washington University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
39 * m i d w a y . c e n i 1 5 5 d r i v e r
41 * author: Chuck Cranor <chuck@ccrc.wustl.edu>
42 * started: spring, 1996 (written from scratch).
44 * notes from the author:
45 * Extra special thanks go to Werner Almesberger, EPFL LRC. Werner's
46 * ENI driver was especially useful in figuring out how this card works.
47 * I would also like to thank Werner for promptly answering email and being
52 #define EN_DDBHOOK 1 /* compile in ddb functions */
55 * Note on EN_ENIDMAFIX: the byte aligner on the ENI version of the card
56 * appears to be broken. it works just fine if there is no load... however
57 * when the card is loaded the data get corrupted. to see this, one only
58 * has to use "telnet" over ATM. do the following command in "telnet":
59 * cat /usr/share/misc/termcap
60 * "telnet" seems to generate lots of 1023 byte mbufs (which make great
61 * use of the byte aligner). watch "netstat -s" for checksum errors.
63 * I further tested this by adding a function that compared the transmit
64 * data on the card's SRAM with the data in the mbuf chain _after_ the
65 * "transmit DMA complete" interrupt. using the "telnet" test I got data
66 * mismatches where the byte-aligned data should have been. using ddb
67 * and en_dumpmem() I verified that the DTQs fed into the card were
68 * absolutely correct. thus, we are forced to concluded that the ENI
69 * hardware is buggy. note that the Adaptec version of the card works
70 * just fine with byte DMA.
72 * bottom line: we set EN_ENIDMAFIX to 1 to avoid byte DMAs on the ENI
76 #if defined(DIAGNOSTIC) && !defined(EN_DIAG)
77 #define EN_DIAG /* link in with master DIAG option */
80 #define EN_COUNT(X) (X)++
88 * This macro removes almost all the EN_DEBUG conditionals in the code that make
89 * to code a good deal less readable.
91 #define DBG(SC, FL, PRINT) do { \
92 if ((SC)->debug & DBG_##FL) { \
93 device_printf((SC)->dev, "%s: "#FL": ", __func__); \
100 DBG_INIT = 0x0001, /* debug attach/detach */
101 DBG_TX = 0x0002, /* debug transmitting */
102 DBG_SERV = 0x0004, /* debug service interrupts */
103 DBG_IOCTL = 0x0008, /* debug ioctls */
104 DBG_VC = 0x0010, /* debug VC handling */
105 DBG_INTR = 0x0020, /* debug interrupts */
106 DBG_DMA = 0x0040, /* debug DMA probing */
107 DBG_IPACKETS = 0x0080, /* print input packets */
108 DBG_REG = 0x0100, /* print all register access */
109 DBG_LOCK = 0x0200, /* debug locking */
114 #define DBG(SC, FL, PRINT) do { } while (0)
116 #endif /* EN_DEBUG */
118 #include "opt_inet.h"
119 #include "opt_natm.h"
127 #include <sys/param.h>
128 #include <sys/systm.h>
129 #include <sys/queue.h>
130 #include <sys/sockio.h>
131 #include <sys/socket.h>
132 #include <sys/mbuf.h>
133 #include <sys/endian.h>
134 #include <sys/stdint.h>
135 #include <sys/lock.h>
136 #include <sys/mutex.h>
137 #include <sys/condvar.h>
141 #include <net/if_media.h>
142 #include <net/if_atm.h>
144 #if defined(NATM) || defined(INET) || defined(INET6)
145 #include <netinet/in.h>
146 #if defined(INET) || defined(INET6)
147 #include <netinet/if_atm.h>
152 #include <netnatm/natm.h>
156 #include <machine/bus.h>
157 #include <sys/rman.h>
158 #include <sys/module.h>
159 #include <sys/sysctl.h>
160 #include <sys/malloc.h>
161 #include <machine/resource.h>
162 #include <dev/utopia/utopia.h>
163 #include <dev/en/midwayreg.h>
164 #include <dev/en/midwayvar.h>
172 #define EN_TXHIWAT (64 * 1024) /* max 64 KB waiting to be DMAd out */
175 SYSCTL_DECL(_hw_atm);
180 * The plan is indexed by the number of words to transfer.
181 * The maximum index is 15 for 60 words.
184 uint8_t bcode; /* code */
185 uint8_t divshift; /* byte divisor */
188 static const struct en_dmatab en_dmaplan[] = {
189 { 0, 0 }, /* 0 */ { MIDDMA_WORD, 2}, /* 1 */
190 { MIDDMA_2WORD, 3}, /* 2 */ { MIDDMA_WORD, 2}, /* 3 */
191 { MIDDMA_4WORD, 4}, /* 4 */ { MIDDMA_WORD, 2}, /* 5 */
192 { MIDDMA_2WORD, 3}, /* 6 */ { MIDDMA_WORD, 2}, /* 7 */
193 { MIDDMA_8WORD, 5}, /* 8 */ { MIDDMA_WORD, 2}, /* 9 */
194 { MIDDMA_2WORD, 3}, /* 10 */ { MIDDMA_WORD, 2}, /* 11 */
195 { MIDDMA_4WORD, 4}, /* 12 */ { MIDDMA_WORD, 2}, /* 13 */
196 { MIDDMA_2WORD, 3}, /* 14 */ { MIDDMA_WORD, 2}, /* 15 */
197 { MIDDMA_16WORD,6}, /* 16 */
204 int en_dump(int unit, int level);
205 int en_dumpmem(int,int,int);
207 static void en_close_finish(struct en_softc *sc, struct en_vcc *vc);
209 #define EN_LOCK(SC) do { \
210 DBG(SC, LOCK, ("ENLOCK %d\n", __LINE__)); \
211 mtx_lock(&sc->en_mtx); \
213 #define EN_UNLOCK(SC) do { \
214 DBG(SC, LOCK, ("ENUNLOCK %d\n", __LINE__)); \
215 mtx_unlock(&sc->en_mtx); \
217 #define EN_CHECKLOCK(sc) mtx_assert(&sc->en_mtx, MA_OWNED)
220 * While a transmit mbuf is waiting to get transmit DMA resources we
221 * need to keep some information with it. We don't want to allocate
222 * additional memory for this so we stuff it into free fields in the
223 * mbuf packet header. Neither the checksum fields nor the rcvif field are used
226 #define TX_AAL5 0x1 /* transmit AAL5 PDU */
227 #define TX_HAS_TBD 0x2 /* TBD did fit into mbuf */
228 #define TX_HAS_PAD 0x4 /* padding did fit into mbuf */
229 #define TX_HAS_PDU 0x8 /* PDU trailer did fit into mbuf */
231 #define MBUF_SET_TX(M, VCI, FLAGS, DATALEN, PAD, MAP) do { \
232 (M)->m_pkthdr.csum_data = (VCI) | ((FLAGS) << MID_VCI_BITS); \
233 (M)->m_pkthdr.csum_flags = ((DATALEN) & 0xffff) | \
234 ((PAD & 0x3f) << 16); \
235 (M)->m_pkthdr.rcvif = (void *)(MAP); \
238 #define MBUF_GET_TX(M, VCI, FLAGS, DATALEN, PAD, MAP) do { \
239 (VCI) = (M)->m_pkthdr.csum_data & ((1 << MID_VCI_BITS) - 1); \
240 (FLAGS) = ((M)->m_pkthdr.csum_data >> MID_VCI_BITS) & 0xf; \
241 (DATALEN) = (M)->m_pkthdr.csum_flags & 0xffff; \
242 (PAD) = ((M)->m_pkthdr.csum_flags >> 16) & 0x3f; \
243 (MAP) = (void *)((M)->m_pkthdr.rcvif); \
247 #define EN_WRAPADD(START, STOP, CUR, VAL) do { \
248 (CUR) = (CUR) + (VAL); \
249 if ((CUR) >= (STOP)) \
250 (CUR) = (START) + ((CUR) - (STOP)); \
253 #define WORD_IDX(START, X) (((X) - (START)) / sizeof(uint32_t))
255 #define SETQ_END(SC, VAL) ((SC)->is_adaptec ? \
256 ((VAL) | (MID_DMA_END >> 4)) : \
257 ((VAL) | (MID_DMA_END)))
260 * The dtq and drq members are set for each END entry in the corresponding
261 * card queue entry. It is used to find out, when a buffer has been
262 * finished DMAing and can be freed.
264 * We store sc->dtq and sc->drq data in the following format...
265 * the 0x80000 ensures we != 0
267 #define EN_DQ_MK(SLOT, LEN) (((SLOT) << 20) | (LEN) | (0x80000))
268 #define EN_DQ_SLOT(X) ((X) >> 20)
269 #define EN_DQ_LEN(X) ((X) & 0x3ffff)
274 static uma_zone_t en_vcc_zone;
276 /***********************************************************************/
279 * en_read{x}: read a word from the card. These are the only functions
280 * that read from the card.
282 static __inline uint32_t
283 en_readx(struct en_softc *sc, uint32_t r)
288 if (r > MID_MAXOFF || (r % 4))
289 panic("en_read out of range, r=0x%x", r);
291 v = bus_space_read_4(sc->en_memt, sc->en_base, r);
295 static __inline uint32_t
296 en_read(struct en_softc *sc, uint32_t r)
301 if (r > MID_MAXOFF || (r % 4))
302 panic("en_read out of range, r=0x%x", r);
304 v = bus_space_read_4(sc->en_memt, sc->en_base, r);
305 DBG(sc, REG, ("en_read(%#x) -> %08x", r, v));
310 * en_write: write a word to the card. This is the only function that
311 * writes to the card.
314 en_write(struct en_softc *sc, uint32_t r, uint32_t v)
317 if (r > MID_MAXOFF || (r % 4))
318 panic("en_write out of range, r=0x%x", r);
320 DBG(sc, REG, ("en_write(%#x) <- %08x", r, v));
321 bus_space_write_4(sc->en_memt, sc->en_base, r, v);
325 * en_k2sz: convert KBytes to a size parameter (a log2)
338 case 128: return (7);
344 #define en_log2(X) en_k2sz(X)
348 * en_b2sz: convert a DMA burst code to its byte size
354 case MIDDMA_WORD: return (1*4);
356 case MIDDMA_2WORD: return (2*4);
358 case MIDDMA_4WORD: return (4*4);
360 case MIDDMA_8WORD: return (8*4);
361 case MIDDMA_16WMAYBE:
362 case MIDDMA_16WORD: return (16*4);
371 * en_sz2b: convert a burst size (bytes) to DMA burst code
377 case 1*4: return (MIDDMA_WORD);
378 case 2*4: return (MIDDMA_2WORD);
379 case 4*4: return (MIDDMA_4WORD);
380 case 8*4: return (MIDDMA_8WORD);
381 case 16*4: return (MIDDMA_16WORD);
393 en_dump_packet(struct en_softc *sc, struct mbuf *m)
395 int plen = m->m_pkthdr.len;
401 device_printf(sc->dev, "packet len=%d", plen);
404 ptr = mtod(m, u_char *);
405 for (len = 0; len < m->m_len; len++, pos++, ptr++) {
410 printf(" %02x", *ptr);
416 printf("sum of m_len=%u\n", totlen);
420 /*********************************************************************/
426 * Map constructor for a MAP.
428 * This is called each time when a map is allocated
429 * from the pool and about to be returned to the user. Here we actually
430 * allocate the map if there isn't one. The problem is that we may fail
431 * to allocate the DMA map yet have no means to signal this error. Therefor
432 * when allocating a map, the call must check that there is a map. An
433 * additional problem is, that i386 maps will be NULL, yet are ok and must
434 * be freed so let's use a flag to signal allocation.
436 * Caveat: we have no way to know that we are called from an interrupt context
437 * here. We rely on the fact, that bus_dmamap_create uses M_NOWAIT in all
440 * LOCK: any, not needed
443 en_map_ctor(void *mem, int size, void *arg, int flags)
445 struct en_softc *sc = arg;
446 struct en_map *map = mem;
449 err = bus_dmamap_create(sc->txtag, 0, &map->map);
451 device_printf(sc->dev, "cannot create DMA map %d\n", err);
454 map->flags = ENMAP_ALLOC;
462 * Called when a map is disposed into the zone. If the map is loaded, unload
465 * LOCK: any, not needed
468 en_map_dtor(void *mem, int size, void *arg)
470 struct en_map *map = mem;
472 if (map->flags & ENMAP_LOADED) {
473 bus_dmamap_unload(map->sc->txtag, map->map);
474 map->flags &= ~ENMAP_LOADED;
481 * This is called each time a map is returned from the zone to the system.
482 * Get rid of the dmamap here.
484 * LOCK: any, not needed
487 en_map_fini(void *mem, int size)
489 struct en_map *map = mem;
491 bus_dmamap_destroy(map->sc->txtag, map->map);
494 /*********************************************************************/
500 * Argument structure to load a transmit DMA map
506 u_int chan; /* transmit channel */
507 u_int datalen; /* length of user data */
509 u_int wait; /* return: out of resources */
513 * TX DMA map loader helper. This function is the callback when the map
514 * is loaded. It should fill the DMA segment descriptors into the hardware.
516 * LOCK: locked, needed
519 en_txdma_load(void *uarg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize,
522 struct txarg *tx = uarg;
523 struct en_softc *sc = tx->sc;
524 struct en_txslot *slot = &sc->txslot[tx->chan];
525 uint32_t cur; /* on-card buffer position (bytes offset) */
526 uint32_t dtq; /* on-card queue position (byte offset) */
527 uint32_t last_dtq; /* last DTQ we have written */
529 u_int free; /* free queue entries on card */
530 u_int needalign, cnt;
531 bus_size_t rest; /* remaining bytes in current segment */
533 bus_dma_segment_t *s;
534 uint32_t count, bcode;
544 last_dtq = 0; /* make gcc happy */
547 * Local macro to add an entry to the transmit DMA area. If there
548 * are no entries left, return. Save the byte offset of the entry
549 * in last_dtq for later use.
551 #define PUT_DTQ_ENTRY(ENI, BCODE, COUNT, ADDR) \
553 EN_COUNT(sc->stats.txdtqout); \
558 en_write(sc, dtq + 0, (ENI || !sc->is_adaptec) ? \
559 MID_MK_TXQ_ENI(COUNT, tx->chan, 0, BCODE) : \
560 MID_MK_TXQ_ADP(COUNT, tx->chan, 0, BCODE)); \
561 en_write(sc, dtq + 4, ADDR); \
563 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, dtq, 8); \
567 * Local macro to generate a DMA entry to DMA cnt bytes. Updates
568 * the current buffer byte offset accordingly.
570 #define DO_DTQ(TYPE) do { \
572 EN_WRAPADD(slot->start, slot->stop, cur, cnt); \
573 DBG(sc, TX, ("tx%d: "TYPE" %u bytes, %ju left, cur %#x", \
574 tx->chan, cnt, (uintmax_t)rest, cur)); \
576 PUT_DTQ_ENTRY(1, bcode, count, addr); \
581 if (!(tx->flags & TX_HAS_TBD)) {
583 * Prepend the TBD - it did not fit into the first mbuf
585 tmp = MID_TBD_MK1((tx->flags & TX_AAL5) ?
586 MID_TBD_AAL5 : MID_TBD_NOAAL5,
587 sc->vccs[tx->vci]->txspeed,
588 tx->m->m_pkthdr.len / MID_ATMDATASZ);
589 en_write(sc, cur, tmp);
590 EN_WRAPADD(slot->start, slot->stop, cur, 4);
592 tmp = MID_TBD_MK2(tx->vci, 0, 0);
593 en_write(sc, cur, tmp);
594 EN_WRAPADD(slot->start, slot->stop, cur, 4);
596 /* update DMA address */
597 PUT_DTQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
600 for (i = 0, s = segs; i < nseg; i++, s++) {
604 if (sc->is_adaptec) {
605 /* adaptec card - simple */
607 /* advance the on-card buffer pointer */
608 EN_WRAPADD(slot->start, slot->stop, cur, rest);
609 DBG(sc, TX, ("tx%d: adp %ju bytes %#jx (cur now 0x%x)",
610 tx->chan, (uintmax_t)rest, (uintmax_t)addr, cur));
612 PUT_DTQ_ENTRY(0, 0, rest, addr);
618 * do we need to do a DMA op to align to the maximum
619 * burst? Note, that we are alway 32-bit aligned.
622 (needalign = (addr & sc->bestburstmask)) != 0) {
623 /* compute number of bytes, words and code */
624 cnt = sc->bestburstlen - needalign;
627 count = cnt / sizeof(uint32_t);
628 if (sc->noalbursts) {
631 bcode = en_dmaplan[count].bcode;
632 count = cnt >> en_dmaplan[count].divshift;
637 /* do we need to do a max-sized burst? */
638 if (rest >= sc->bestburstlen) {
639 count = rest >> sc->bestburstshift;
640 cnt = count << sc->bestburstshift;
641 bcode = sc->bestburstcode;
645 /* do we need to do a cleanup burst? */
648 count = rest / sizeof(uint32_t);
649 if (sc->noalbursts) {
652 bcode = en_dmaplan[count].bcode;
653 count = cnt >> en_dmaplan[count].divshift;
659 KASSERT (tx->flags & TX_HAS_PAD, ("PDU not padded"));
661 if ((tx->flags & TX_AAL5) && !(tx->flags & TX_HAS_PDU)) {
663 * Append the AAL5 PDU trailer
665 tmp = MID_PDU_MK1(0, 0, tx->datalen);
666 en_write(sc, cur, tmp);
667 EN_WRAPADD(slot->start, slot->stop, cur, 4);
669 en_write(sc, cur, 0);
670 EN_WRAPADD(slot->start, slot->stop, cur, 4);
672 /* update DMA address */
673 PUT_DTQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
676 /* record the end for the interrupt routine */
677 sc->dtq[MID_DTQ_A2REG(last_dtq)] =
678 EN_DQ_MK(tx->chan, tx->m->m_pkthdr.len);
680 /* set the end flag in the last descriptor */
681 en_write(sc, last_dtq + 0, SETQ_END(sc, en_read(sc, last_dtq + 0)));
692 en_write(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_us));
696 * en_txdma: start transmit DMA on the given channel, if possible
698 * This is called from two places: when we got new packets from the upper
699 * layer or when we found that buffer space has freed up during interrupt
702 * LOCK: locked, needed
705 en_txdma(struct en_softc *sc, struct en_txslot *slot)
713 DBG(sc, TX, ("tx%td: starting ...", slot - sc->txslot));
715 bzero(&tx, sizeof(tx));
716 tx.chan = slot - sc->txslot;
720 * get an mbuf waiting for DMA
722 _IF_DEQUEUE(&slot->q, tx.m);
724 DBG(sc, TX, ("tx%td: ...done!", slot - sc->txslot));
727 MBUF_GET_TX(tx.m, tx.vci, tx.flags, tx.datalen, pad, map);
730 * note: don't use the entire buffer space. if WRTX becomes equal
731 * to RDTX, the transmitter stops assuming the buffer is empty! --kjc
733 if (tx.m->m_pkthdr.len >= slot->bfree) {
734 EN_COUNT(sc->stats.txoutspace);
735 DBG(sc, TX, ("tx%td: out of transmit space", slot - sc->txslot));
740 if (!(tx.flags & TX_HAS_PAD)) {
742 /* Append the padding buffer */
743 (void)m_length(tx.m, &lastm);
744 lastm->m_next = sc->padbuf;
745 sc->padbuf->m_len = pad;
747 tx.flags |= TX_HAS_PAD;
751 * Try to load that map
753 error = bus_dmamap_load_mbuf(sc->txtag, map->map, tx.m,
754 en_txdma_load, &tx, BUS_DMA_NOWAIT);
757 lastm->m_next = NULL;
760 device_printf(sc->dev, "loading TX map failed %d\n",
764 map->flags |= ENMAP_LOADED;
766 /* probably not enough space */
767 bus_dmamap_unload(map->sc->txtag, map->map);
768 map->flags &= ~ENMAP_LOADED;
771 DBG(sc, TX, ("tx%td: out of transmit DTQs", slot - sc->txslot));
775 EN_COUNT(sc->stats.launch);
776 sc->ifp->if_opackets++;
778 sc->vccs[tx.vci]->opackets++;
779 sc->vccs[tx.vci]->obytes += tx.datalen;
782 if (bpf_peers_present(sc->ifp->if_bpf)) {
784 * adjust the top of the mbuf to skip the TBD if present
785 * before passing the packet to bpf.
786 * Also remove padding and the PDU trailer. Assume both of
787 * them to be in the same mbuf. pktlen, m_len and m_data
788 * are not needed anymore so we can change them.
790 if (tx.flags & TX_HAS_TBD) {
791 tx.m->m_data += MID_TBD_SIZE;
792 tx.m->m_len -= MID_TBD_SIZE;
794 tx.m->m_pkthdr.len = m_length(tx.m, &lastm);
795 if (tx.m->m_pkthdr.len > tx.datalen) {
796 lastm->m_len -= tx.m->m_pkthdr.len - tx.datalen;
797 tx.m->m_pkthdr.len = tx.datalen;
800 bpf_mtap(sc->ifp->if_bpf, tx.m);
805 * do some housekeeping and get the next packet
807 slot->bfree -= tx.m->m_pkthdr.len;
808 _IF_ENQUEUE(&slot->indma, tx.m);
813 * error handling. This is jumped to when we just want to drop
814 * the packet. Must be unlocked here.
818 uma_zfree(sc->map_zone, map);
820 slot->mbsize -= tx.m->m_pkthdr.len;
827 _IF_PREPEND(&slot->q, tx.m);
831 * Create a copy of a single mbuf. It can have either internal or
832 * external data, it may have a packet header. External data is really
833 * copied, so the new buffer is writeable.
835 * LOCK: any, not needed
838 copy_mbuf(struct mbuf *m)
842 MGET(new, M_WAITOK, MT_DATA);
844 if (m->m_flags & M_PKTHDR) {
845 M_MOVE_PKTHDR(new, m);
846 if (m->m_len > MHLEN)
847 MCLGET(new, M_WAITOK);
850 MCLGET(new, M_WAITOK);
853 bcopy(m->m_data, new->m_data, m->m_len);
854 new->m_len = m->m_len;
855 new->m_flags &= ~M_RDONLY;
861 * This function is called when we have an ENI adapter. It fixes the
862 * mbuf chain, so that all addresses and lengths are 4 byte aligned.
863 * The overall length is already padded to multiple of cells plus the
864 * TBD so this must always succeed. The routine can fail, when it
865 * needs to copy an mbuf (this may happen if an mbuf is readonly).
867 * We assume here, that aligning the virtual addresses to 4 bytes also
868 * aligns the physical addresses.
870 * LOCK: locked, needed
873 en_fix_mchain(struct en_softc *sc, struct mbuf *m0, u_int *pad)
875 struct mbuf **prev = &m0;
882 d = mtod(m, u_char *);
883 if ((off = (uintptr_t)d % sizeof(uint32_t)) != 0) {
884 EN_COUNT(sc->stats.mfixaddr);
886 bcopy(d, d - off, m->m_len);
889 if ((new = copy_mbuf(m)) == NULL) {
890 EN_COUNT(sc->stats.mfixfail);
894 new->m_next = m_free(m);
899 if ((off = m->m_len % sizeof(uint32_t)) != 0) {
900 EN_COUNT(sc->stats.mfixlen);
901 if (!M_WRITABLE(m)) {
902 if ((new = copy_mbuf(m)) == NULL) {
903 EN_COUNT(sc->stats.mfixfail);
907 new->m_next = m_free(m);
910 d = mtod(m, u_char *) + m->m_len;
913 while (m->m_next && m->m_next->m_len == 0)
914 m->m_next = m_free(m->m_next);
916 if (m->m_next == NULL) {
918 KASSERT(*pad > 0, ("no padding space"));
921 *d++ = *mtod(m->m_next, u_char *);
938 * en_start: start transmitting the next packet that needs to go out
939 * if there is one. We take off all packets from the interface's queue and
940 * put them into the channels queue.
942 * Here we also prepend the transmit packet descriptor and append the padding
943 * and (for aal5) the PDU trailer. This is different from the original driver:
944 * we assume, that allocating one or two additional mbufs is actually cheaper
945 * than all this algorithmic fiddling we would need otherwise.
947 * While the packet is on the channels wait queue we use the csum_* fields
948 * in the packet header to hold the original datalen, the AAL5 flag and the
949 * VCI. The packet length field in the header holds the needed buffer space.
950 * This may actually be more than the length of the current mbuf chain (when
951 * one or more of TBD, padding and PDU do not fit).
953 * LOCK: unlocked, needed
956 en_start(struct ifnet *ifp)
958 struct en_softc *sc = (struct en_softc *)ifp->if_softc;
959 struct mbuf *m, *lastm;
960 struct atm_pseudohdr *ap;
961 u_int pad; /* 0-bytes to pad at PDU end */
962 u_int datalen; /* length of user data */
963 u_int vci; /* the VCI we are transmitting on */
969 struct en_txslot *tx;
972 IF_DEQUEUE(&ifp->if_snd, m);
978 ap = mtod(m, struct atm_pseudohdr *);
979 vci = ATM_PH_VCI(ap);
981 if (ATM_PH_VPI(ap) != 0 || vci >= MID_N_VC ||
982 (vc = sc->vccs[vci]) == NULL ||
983 (vc->vflags & VCC_CLOSE_RX)) {
984 DBG(sc, TX, ("output vpi=%u, vci=%u -- drop",
985 ATM_PH_VPI(ap), vci));
989 if (vc->vcc.aal == ATMIO_AAL_5)
991 m_adj(m, sizeof(struct atm_pseudohdr));
994 * (re-)calculate size of packet (in bytes)
996 m->m_pkthdr.len = datalen = m_length(m, &lastm);
999 * computing how much padding we need on the end of the mbuf,
1000 * then see if we can put the TBD at the front of the mbuf
1001 * where the link header goes (well behaved protocols will
1002 * reserve room for us). Last, check if room for PDU tail.
1004 if (flags & TX_AAL5)
1005 m->m_pkthdr.len += MID_PDU_SIZE;
1006 m->m_pkthdr.len = roundup(m->m_pkthdr.len, MID_ATMDATASZ);
1007 pad = m->m_pkthdr.len - datalen;
1008 if (flags & TX_AAL5)
1009 pad -= MID_PDU_SIZE;
1010 m->m_pkthdr.len += MID_TBD_SIZE;
1012 DBG(sc, TX, ("txvci%d: buflen=%u datalen=%u lead=%d trail=%d",
1013 vci, m->m_pkthdr.len, datalen, (int)M_LEADINGSPACE(m),
1014 (int)M_TRAILINGSPACE(lastm)));
1017 * From here on we need access to sc
1022 * Allocate a map. We do this here rather then in en_txdma,
1023 * because en_txdma is also called from the interrupt handler
1024 * and we are going to have a locking problem then. We must
1025 * use NOWAIT here, because the ip_output path holds various
1028 map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
1030 /* drop that packet */
1031 EN_COUNT(sc->stats.txnomap);
1037 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1039 uma_zfree(sc->map_zone, map);
1045 * Look, whether we can prepend the TBD (8 byte)
1047 if (M_WRITABLE(m) && M_LEADINGSPACE(m) >= MID_TBD_SIZE) {
1048 tbd[0] = htobe32(MID_TBD_MK1((flags & TX_AAL5) ?
1049 MID_TBD_AAL5 : MID_TBD_NOAAL5,
1050 vc->txspeed, m->m_pkthdr.len / MID_ATMDATASZ));
1051 tbd[1] = htobe32(MID_TBD_MK2(vci, 0, 0));
1053 m->m_data -= MID_TBD_SIZE;
1054 bcopy(tbd, m->m_data, MID_TBD_SIZE);
1055 m->m_len += MID_TBD_SIZE;
1056 flags |= TX_HAS_TBD;
1060 * Check whether the padding fits (must be writeable -
1061 * we pad with zero).
1063 if (M_WRITABLE(lastm) && M_TRAILINGSPACE(lastm) >= pad) {
1064 bzero(lastm->m_data + lastm->m_len, pad);
1065 lastm->m_len += pad;
1066 flags |= TX_HAS_PAD;
1068 if ((flags & TX_AAL5) &&
1069 M_TRAILINGSPACE(lastm) > MID_PDU_SIZE) {
1070 pdu[0] = htobe32(MID_PDU_MK1(0, 0, datalen));
1072 bcopy(pdu, lastm->m_data + lastm->m_len,
1074 lastm->m_len += MID_PDU_SIZE;
1075 flags |= TX_HAS_PDU;
1079 if (!sc->is_adaptec &&
1080 (m = en_fix_mchain(sc, m, &pad)) == NULL) {
1082 uma_zfree(sc->map_zone, map);
1087 * get assigned channel (will be zero unless txspeed is set)
1091 if (m->m_pkthdr.len > EN_TXSZ * 1024) {
1092 DBG(sc, TX, ("tx%td: packet larger than xmit buffer "
1093 "(%d > %d)\n", tx - sc->txslot, m->m_pkthdr.len,
1097 uma_zfree(sc->map_zone, map);
1101 if (tx->mbsize > EN_TXHIWAT) {
1102 EN_COUNT(sc->stats.txmbovr);
1103 DBG(sc, TX, ("tx%td: buffer space shortage",
1107 uma_zfree(sc->map_zone, map);
1112 tx->mbsize += m->m_pkthdr.len;
1114 DBG(sc, TX, ("tx%td: VCI=%d, speed=0x%x, buflen=%d, mbsize=%d",
1115 tx - sc->txslot, vci, sc->vccs[vci]->txspeed,
1116 m->m_pkthdr.len, tx->mbsize));
1118 MBUF_SET_TX(m, vci, flags, datalen, pad, map);
1120 _IF_ENQUEUE(&tx->q, m);
1128 /*********************************************************************/
1134 * en_loadvc: load a vc tab entry from a slot
1136 * LOCK: locked, needed
1139 en_loadvc(struct en_softc *sc, struct en_vcc *vc)
1141 uint32_t reg = en_read(sc, MID_VC(vc->vcc.vci));
1143 reg = MIDV_SETMODE(reg, MIDV_TRASH);
1144 en_write(sc, MID_VC(vc->vcc.vci), reg);
1147 /* no need to set CRC */
1149 /* read pointer = 0, desc. start = 0 */
1150 en_write(sc, MID_DST_RP(vc->vcc.vci), 0);
1151 /* write pointer = 0 */
1152 en_write(sc, MID_WP_ST_CNT(vc->vcc.vci), 0);
1153 /* set mode, size, loc */
1154 en_write(sc, MID_VC(vc->vcc.vci), vc->rxslot->mode);
1156 vc->rxslot->cur = vc->rxslot->start;
1158 DBG(sc, VC, ("rx%td: assigned to VCI %d", vc->rxslot - sc->rxslot,
1163 * Open the given vcc.
1165 * LOCK: unlocked, needed
1168 en_open_vcc(struct en_softc *sc, struct atmio_openvcc *op)
1170 uint32_t oldmode, newmode;
1171 struct en_rxslot *slot;
1175 DBG(sc, IOCTL, ("enable vpi=%d, vci=%d, flags=%#x",
1176 op->param.vpi, op->param.vci, op->param.flags));
1178 if (op->param.vpi != 0 || op->param.vci >= MID_N_VC)
1181 vc = uma_zalloc(en_vcc_zone, M_NOWAIT | M_ZERO);
1187 if (sc->vccs[op->param.vci] != NULL) {
1192 /* find a free receive slot */
1193 for (slot = sc->rxslot; slot < &sc->rxslot[sc->en_nrx]; slot++)
1194 if (slot->vcc == NULL)
1196 if (slot == &sc->rxslot[sc->en_nrx]) {
1202 vc->rxhand = op->rxhand;
1203 vc->vcc = op->param;
1205 oldmode = slot->mode;
1206 newmode = (op->param.aal == ATMIO_AAL_5) ? MIDV_AAL5 : MIDV_NOAAL;
1207 slot->mode = MIDV_SETMODE(oldmode, newmode);
1210 KASSERT (_IF_QLEN(&slot->indma) == 0 && _IF_QLEN(&slot->q) == 0,
1211 ("en_rxctl: left over mbufs on enable slot=%td",
1212 vc->rxslot - sc->rxslot));
1215 vc->txslot = sc->txslot;
1216 vc->txslot->nref++; /* bump reference count */
1218 en_loadvc(sc, vc); /* does debug printf for us */
1220 /* don't free below */
1221 sc->vccs[vc->vcc.vci] = vc;
1227 uma_zfree(en_vcc_zone, vc);
1237 en_close_finish(struct en_softc *sc, struct en_vcc *vc)
1240 if (vc->rxslot != NULL)
1241 vc->rxslot->vcc = NULL;
1243 DBG(sc, VC, ("vci: %u free (%p)", vc->vcc.vci, vc));
1245 sc->vccs[vc->vcc.vci] = NULL;
1246 uma_zfree(en_vcc_zone, vc);
1251 * LOCK: unlocked, needed
1254 en_close_vcc(struct en_softc *sc, struct atmio_closevcc *cl)
1256 uint32_t oldmode, newmode;
1260 DBG(sc, IOCTL, ("disable vpi=%d, vci=%d", cl->vpi, cl->vci));
1262 if (cl->vpi != 0 || cl->vci >= MID_N_VC)
1266 if ((vc = sc->vccs[cl->vci]) == NULL) {
1274 if (vc->rxslot == NULL) {
1278 if (vc->vflags & VCC_DRAIN) {
1283 oldmode = en_read(sc, MID_VC(cl->vci));
1284 newmode = MIDV_SETMODE(oldmode, MIDV_TRASH) & ~MIDV_INSERVICE;
1285 en_write(sc, MID_VC(cl->vci), (newmode | (oldmode & MIDV_INSERVICE)));
1287 /* halt in tracks, be careful to preserve inservice bit */
1289 vc->rxslot->mode = newmode;
1293 /* if stuff is still going on we are going to have to drain it out */
1294 if (_IF_QLEN(&vc->rxslot->indma) == 0 &&
1295 _IF_QLEN(&vc->rxslot->q) == 0 &&
1296 (vc->vflags & VCC_SWSL) == 0) {
1297 en_close_finish(sc, vc);
1301 vc->vflags |= VCC_DRAIN;
1302 DBG(sc, IOCTL, ("VCI %u now draining", cl->vci));
1304 if (vc->vcc.flags & ATMIO_FLAG_ASYNC)
1307 vc->vflags |= VCC_CLOSE_RX;
1308 while ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1309 (vc->vflags & VCC_DRAIN))
1310 cv_wait(&sc->cv_close, &sc->en_mtx);
1312 en_close_finish(sc, vc);
1313 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1324 /*********************************************************************/
1326 * starting/stopping the card
1330 * en_reset_ul: reset the board, throw away work in progress.
1331 * must en_init to recover.
1333 * LOCK: locked, needed
1336 en_reset_ul(struct en_softc *sc)
1340 struct en_rxslot *rx;
1343 device_printf(sc->dev, "reset\n");
1344 sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1346 if (sc->en_busreset)
1347 sc->en_busreset(sc);
1348 en_write(sc, MID_RESID, 0x0); /* reset hardware */
1351 * recv: dump any mbufs we are dma'ing into, if DRAINing, then a reset
1352 * will free us! Don't release the rxslot from the channel.
1354 for (lcv = 0 ; lcv < MID_N_VC ; lcv++) {
1355 if (sc->vccs[lcv] == NULL)
1357 rx = sc->vccs[lcv]->rxslot;
1360 _IF_DEQUEUE(&rx->indma, m);
1363 map = (void *)m->m_pkthdr.rcvif;
1364 uma_zfree(sc->map_zone, map);
1368 _IF_DEQUEUE(&rx->q, m);
1373 sc->vccs[lcv]->vflags = 0;
1377 * xmit: dump everything
1379 for (lcv = 0 ; lcv < EN_NTX ; lcv++) {
1381 _IF_DEQUEUE(&sc->txslot[lcv].indma, m);
1384 map = (void *)m->m_pkthdr.rcvif;
1385 uma_zfree(sc->map_zone, map);
1389 _IF_DEQUEUE(&sc->txslot[lcv].q, m);
1392 map = (void *)m->m_pkthdr.rcvif;
1393 uma_zfree(sc->map_zone, map);
1396 sc->txslot[lcv].mbsize = 0;
1400 * Unstop all waiters
1402 cv_broadcast(&sc->cv_close);
1406 * en_reset: reset the board, throw away work in progress.
1407 * must en_init to recover.
1409 * LOCK: unlocked, needed
1411 * Use en_reset_ul if you alreay have the lock
1414 en_reset(struct en_softc *sc)
1423 * en_init: init board and sync the card with the data in the softc.
1425 * LOCK: locked, needed
1428 en_init(struct en_softc *sc)
1433 if ((sc->ifp->if_flags & IFF_UP) == 0) {
1434 DBG(sc, INIT, ("going down"));
1435 en_reset(sc); /* to be safe */
1439 DBG(sc, INIT, ("going up"));
1440 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; /* enable */
1442 if (sc->en_busreset)
1443 sc->en_busreset(sc);
1444 en_write(sc, MID_RESID, 0x0); /* reset */
1447 bus_space_set_region_4(sc->en_memt, sc->en_base,
1448 MID_RAMOFF, 0, sc->en_obmemsz / 4);
1451 * init obmem data structures: vc tab, dma q's, slist.
1453 * note that we set drq_free/dtq_free to one less than the total number
1454 * of DTQ/DRQs present. we do this because the card uses the condition
1455 * (drq_chip == drq_us) to mean "list is empty"... but if you allow the
1456 * circular list to be completely full then (drq_chip == drq_us) [i.e.
1457 * the drq_us pointer will wrap all the way around]. by restricting
1458 * the number of active requests to (N - 1) we prevent the list from
1459 * becoming completely full. note that the card will sometimes give
1460 * us an interrupt for a DTQ/DRQ we have already processes... this helps
1461 * keep that interrupt from messing us up.
1463 bzero(&sc->drq, sizeof(sc->drq));
1464 sc->drq_free = MID_DRQ_N - 1;
1465 sc->drq_chip = MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX));
1466 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
1467 sc->drq_us = sc->drq_chip;
1469 bzero(&sc->dtq, sizeof(sc->dtq));
1470 sc->dtq_free = MID_DTQ_N - 1;
1471 sc->dtq_chip = MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX));
1472 en_write(sc, MID_DMA_WRTX, MID_DRQ_A2REG(sc->dtq_chip));
1473 sc->dtq_us = sc->dtq_chip;
1475 sc->hwslistp = MID_SL_REG2A(en_read(sc, MID_SERV_WRITE));
1476 sc->swsl_size = sc->swsl_head = sc->swsl_tail = 0;
1478 DBG(sc, INIT, ("drq free/chip: %d/0x%x, dtq free/chip: %d/0x%x, "
1479 "hwslist: 0x%x", sc->drq_free, sc->drq_chip, sc->dtq_free,
1480 sc->dtq_chip, sc->hwslistp));
1482 for (slot = 0 ; slot < EN_NTX ; slot++) {
1483 sc->txslot[slot].bfree = EN_TXSZ * 1024;
1484 en_write(sc, MIDX_READPTR(slot), 0);
1485 en_write(sc, MIDX_DESCSTART(slot), 0);
1486 loc = sc->txslot[slot].cur = sc->txslot[slot].start;
1487 loc = loc - MID_RAMOFF;
1488 /* mask, cvt to words */
1489 loc = (loc & ~((EN_TXSZ * 1024) - 1)) >> 2;
1491 loc = loc >> MIDV_LOCTOPSHFT;
1492 en_write(sc, MIDX_PLACE(slot), MIDX_MKPLACE(en_k2sz(EN_TXSZ),
1494 DBG(sc, INIT, ("tx%d: place 0x%x", slot,
1495 (u_int)en_read(sc, MIDX_PLACE(slot))));
1498 for (vc = 0; vc < MID_N_VC; vc++)
1499 if (sc->vccs[vc] != NULL)
1500 en_loadvc(sc, sc->vccs[vc]);
1505 en_write(sc, MID_INTENA, MID_INT_TX | MID_INT_DMA_OVR | MID_INT_IDENT |
1506 MID_INT_LERR | MID_INT_DMA_ERR | MID_INT_DMA_RX | MID_INT_DMA_TX |
1507 MID_INT_SERVICE | MID_INT_SUNI | MID_INT_STATS);
1508 en_write(sc, MID_MAST_CSR, MID_SETIPL(sc->ipl) | MID_MCSR_ENDMA |
1509 MID_MCSR_ENTX | MID_MCSR_ENRX);
1512 /*********************************************************************/
1517 * en_ioctl: handle ioctl requests
1519 * NOTE: if you add an ioctl to set txspeed, you should choose a new
1520 * TX channel/slot. Choose the one with the lowest sc->txslot[slot].nref
1521 * value, subtract one from sc->txslot[0].nref, add one to the
1522 * sc->txslot[slot].nref, set sc->txvc2slot[vci] = slot, and then set
1525 * LOCK: unlocked, needed
1528 en_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1530 struct en_softc *sc = (struct en_softc *)ifp->if_softc;
1531 #if defined(INET) || defined(INET6)
1532 struct ifaddr *ifa = (struct ifaddr *)data;
1534 struct ifreq *ifr = (struct ifreq *)data;
1535 struct atmio_vcctable *vtab;
1542 ifp->if_flags |= IFF_UP;
1543 #if defined(INET) || defined(INET6)
1544 if (ifa->ifa_addr->sa_family == AF_INET
1545 || ifa->ifa_addr->sa_family == AF_INET6) {
1546 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1550 ifa->ifa_rtrequest = atm_rtrequest; /* ??? */
1555 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1564 if (ifp->if_flags & IFF_UP) {
1565 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1568 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1576 * Set the interface MTU.
1578 if (ifr->ifr_mtu > ATMMTU) {
1582 ifp->if_mtu = ifr->ifr_mtu;
1587 error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
1590 case SIOCATMOPENVCC: /* kernel internal use */
1591 error = en_open_vcc(sc, (struct atmio_openvcc *)data);
1594 case SIOCATMCLOSEVCC: /* kernel internal use */
1595 error = en_close_vcc(sc, (struct atmio_closevcc *)data);
1598 case SIOCATMGETVCCS: /* internal netgraph use */
1599 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
1600 MID_N_VC, sc->vccs_open, &sc->en_mtx, 0);
1605 *(void **)data = vtab;
1608 case SIOCATMGVCCS: /* return vcc table */
1609 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
1610 MID_N_VC, sc->vccs_open, &sc->en_mtx, 1);
1611 error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
1612 vtab->count * sizeof(vtab->vccs[0]));
1613 free(vtab, M_DEVBUF);
1623 /*********************************************************************/
1629 * Sysctl handler for internal statistics
1631 * LOCK: unlocked, needed
1634 en_sysctl_istats(SYSCTL_HANDLER_ARGS)
1636 struct en_softc *sc = arg1;
1640 ret = malloc(sizeof(sc->stats), M_TEMP, M_WAITOK);
1643 bcopy(&sc->stats, ret, sizeof(sc->stats));
1646 error = SYSCTL_OUT(req, ret, sizeof(sc->stats));
1652 /*********************************************************************/
1658 * Transmit interrupt handler
1660 * check for tx complete, if detected then this means that some space
1661 * has come free on the card. we must account for it and arrange to
1662 * kick the channel to life (in case it is stalled waiting on the card).
1664 * LOCK: locked, needed
1667 en_intr_tx(struct en_softc *sc, uint32_t reg)
1674 kick = 0; /* bitmask of channels to kick */
1676 for (mask = 1, chan = 0; chan < EN_NTX; chan++, mask *= 2) {
1677 if (!(reg & MID_TXCHAN(chan)))
1682 /* current read pointer */
1683 val = en_read(sc, MIDX_READPTR(chan));
1685 val = (val * sizeof(uint32_t)) + sc->txslot[chan].start;
1686 if (val > sc->txslot[chan].cur)
1687 sc->txslot[chan].bfree = val - sc->txslot[chan].cur;
1689 sc->txslot[chan].bfree = (val + (EN_TXSZ * 1024)) -
1690 sc->txslot[chan].cur;
1691 DBG(sc, INTR, ("tx%d: transmit done. %d bytes now free in "
1692 "buffer", chan, sc->txslot[chan].bfree));
1700 * check for TX DMA complete, if detected then this means
1701 * that some DTQs are now free. it also means some indma
1702 * mbufs can be freed. if we needed DTQs, kick all channels.
1704 * LOCK: locked, needed
1707 en_intr_tx_dma(struct en_softc *sc)
1717 val = en_read(sc, MID_DMA_RDTX); /* chip's current location */
1718 idx = MID_DTQ_A2REG(sc->dtq_chip); /* where we last saw chip */
1720 if (sc->need_dtqs) {
1721 kick = MID_NTX_CH - 1; /* assume power of 2, kick all! */
1722 sc->need_dtqs = 0; /* recalculated in "kick" loop below */
1723 DBG(sc, INTR, ("cleared need DTQ condition"));
1726 while (idx != val) {
1728 if ((dtq = sc->dtq[idx]) != 0) {
1729 /* don't forget to zero it out when done */
1731 slot = EN_DQ_SLOT(dtq);
1733 _IF_DEQUEUE(&sc->txslot[slot].indma, m);
1735 panic("enintr: dtqsync");
1736 map = (void *)m->m_pkthdr.rcvif;
1737 uma_zfree(sc->map_zone, map);
1740 sc->txslot[slot].mbsize -= EN_DQ_LEN(dtq);
1741 DBG(sc, INTR, ("tx%d: free %d dma bytes, mbsize now "
1742 "%d", slot, EN_DQ_LEN(dtq),
1743 sc->txslot[slot].mbsize));
1745 EN_WRAPADD(0, MID_DTQ_N, idx, 1);
1747 sc->dtq_chip = MID_DTQ_REG2A(val); /* sync softc */
1755 * LOCK: locked, needed
1758 en_intr_service(struct en_softc *sc)
1762 int need_softserv = 0;
1765 chip = MID_SL_REG2A(en_read(sc, MID_SERV_WRITE));
1767 while (sc->hwslistp != chip) {
1768 /* fetch and remove it from hardware service list */
1769 vci = en_read(sc, sc->hwslistp);
1770 EN_WRAPADD(MID_SLOFF, MID_SLEND, sc->hwslistp, 4);
1772 if ((vc = sc->vccs[vci]) == NULL ||
1773 (vc->vcc.flags & ATMIO_FLAG_NORX)) {
1774 DBG(sc, INTR, ("unexpected rx interrupt VCI %d", vci));
1775 en_write(sc, MID_VC(vci), MIDV_TRASH); /* rx off */
1779 /* remove from hwsl */
1780 en_write(sc, MID_VC(vci), vc->rxslot->mode);
1781 EN_COUNT(sc->stats.hwpull);
1783 DBG(sc, INTR, ("pulled VCI %d off hwslist", vci));
1785 /* add it to the software service list (if needed) */
1786 if ((vc->vflags & VCC_SWSL) == 0) {
1787 EN_COUNT(sc->stats.swadd);
1789 vc->vflags |= VCC_SWSL;
1790 sc->swslist[sc->swsl_tail] = vci;
1791 EN_WRAPADD(0, MID_SL_N, sc->swsl_tail, 1);
1793 DBG(sc, INTR, ("added VCI %d to swslist", vci));
1796 return (need_softserv);
1800 * Handle a receive DMA completion
1803 en_rx_drain(struct en_softc *sc, u_int drq)
1805 struct en_rxslot *slot;
1808 struct atm_pseudohdr ah;
1810 slot = &sc->rxslot[EN_DQ_SLOT(drq)];
1812 m = NULL; /* assume "JK" trash DMA */
1813 if (EN_DQ_LEN(drq) != 0) {
1814 _IF_DEQUEUE(&slot->indma, m);
1815 KASSERT(m != NULL, ("drqsync: %s: lost mbuf in slot %td!",
1816 sc->ifp->if_xname, slot - sc->rxslot));
1817 uma_zfree(sc->map_zone, (struct en_map *)m->m_pkthdr.rcvif);
1819 if ((vc = slot->vcc) == NULL) {
1826 /* do something with this mbuf */
1827 if (vc->vflags & VCC_DRAIN) {
1831 if (_IF_QLEN(&slot->indma) == 0 && _IF_QLEN(&slot->q) == 0 &&
1832 (en_read(sc, MID_VC(vc->vcc.vci)) & MIDV_INSERVICE) == 0 &&
1833 (vc->vflags & VCC_SWSL) == 0) {
1834 vc->vflags &= ~VCC_CLOSE_RX;
1835 if (vc->vcc.flags & ATMIO_FLAG_ASYNC)
1836 en_close_finish(sc, vc);
1838 cv_signal(&sc->cv_close);
1844 ATM_PH_FLAGS(&ah) = vc->vcc.flags;
1845 ATM_PH_VPI(&ah) = 0;
1846 ATM_PH_SETVCI(&ah, vc->vcc.vci);
1848 DBG(sc, INTR, ("rx%td: rxvci%d: atm_input, mbuf %p, len %d, "
1849 "hand %p", slot - sc->rxslot, vc->vcc.vci, m,
1850 EN_DQ_LEN(drq), vc->rxhand));
1852 m->m_pkthdr.rcvif = sc->ifp;
1853 sc->ifp->if_ipackets++;
1856 vc->ibytes += m->m_pkthdr.len;
1859 if (sc->debug & DBG_IPACKETS)
1860 en_dump_packet(sc, m);
1863 BPF_MTAP(sc->ifp, m);
1866 atm_input(sc->ifp, &ah, m, vc->rxhand);
1872 * check for RX DMA complete, and pass the data "upstairs"
1874 * LOCK: locked, needed
1877 en_intr_rx_dma(struct en_softc *sc)
1883 val = en_read(sc, MID_DMA_RDRX); /* chip's current location */
1884 idx = MID_DRQ_A2REG(sc->drq_chip); /* where we last saw chip */
1886 while (idx != val) {
1888 if ((drq = sc->drq[idx]) != 0) {
1889 /* don't forget to zero it out when done */
1891 en_rx_drain(sc, drq);
1893 EN_WRAPADD(0, MID_DRQ_N, idx, 1);
1895 sc->drq_chip = MID_DRQ_REG2A(val); /* sync softc */
1897 if (sc->need_drqs) {
1898 /* true if we had a DRQ shortage */
1900 DBG(sc, INTR, ("cleared need DRQ condition"));
1907 * en_mget: get an mbuf chain that can hold totlen bytes and return it
1908 * (for recv). For the actual allocation totlen is rounded up to a multiple
1909 * of 4. We also ensure, that each mbuf has a multiple of 4 bytes.
1911 * After this call the sum of all the m_len's in the chain will be totlen.
1912 * This is called at interrupt time, so we can't wait here.
1914 * LOCK: any, not needed
1916 static struct mbuf *
1917 en_mget(struct en_softc *sc, u_int pktlen)
1919 struct mbuf *m, *tmp;
1922 totlen = roundup(pktlen, sizeof(uint32_t));
1923 pad = totlen - pktlen;
1926 * First get an mbuf with header. Keep space for a couple of
1927 * words at the begin.
1929 /* called from interrupt context */
1930 MGETHDR(m, M_NOWAIT, MT_DATA);
1934 m->m_pkthdr.rcvif = NULL;
1935 m->m_pkthdr.len = pktlen;
1936 m->m_len = EN_RX1BUF;
1937 MH_ALIGN(m, EN_RX1BUF);
1938 if (m->m_len >= totlen) {
1944 /* called from interrupt context */
1945 tmp = m_getm(m, totlen, M_NOWAIT, MT_DATA);
1951 /* m_getm could do this for us */
1952 while (tmp != NULL) {
1953 tmp->m_len = min(MCLBYTES, totlen);
1954 totlen -= tmp->m_len;
1963 * Argument for RX DMAMAP loader.
1966 struct en_softc *sc;
1968 u_int pre_skip; /* number of bytes to skip at begin */
1969 u_int post_skip; /* number of bytes to skip at end */
1970 struct en_vcc *vc; /* vc we are receiving on */
1971 int wait; /* wait for DRQ entries */
1975 * Copy the segment table to the buffer for later use. And compute the
1976 * number of dma queue entries we need.
1978 * LOCK: locked, needed
1981 en_rxdma_load(void *uarg, bus_dma_segment_t *segs, int nseg,
1982 bus_size_t mapsize, int error)
1984 struct rxarg *rx = uarg;
1985 struct en_softc *sc = rx->sc;
1986 struct en_rxslot *slot = rx->vc->rxslot;
1987 u_int free; /* number of free DRQ entries */
1988 uint32_t cur; /* current buffer offset */
1989 uint32_t drq; /* DRQ entry pointer */
1990 uint32_t last_drq; /* where we have written last */
1991 u_int needalign, cnt, count, bcode;
1998 if (nseg > EN_MAX_DMASEG)
1999 panic("too many DMA segments");
2003 free = sc->drq_free;
2010 * Local macro to add an entry to the receive DMA area. If there
2011 * are no entries left, return. Save the byte offset of the entry
2012 * in last_drq for later use.
2014 #define PUT_DRQ_ENTRY(ENI, BCODE, COUNT, ADDR) \
2016 EN_COUNT(sc->stats.rxdrqout); \
2021 en_write(sc, drq + 0, (ENI || !sc->is_adaptec) ? \
2022 MID_MK_RXQ_ENI(COUNT, rx->vc->vcc.vci, 0, BCODE) : \
2023 MID_MK_RXQ_ADP(COUNT, rx->vc->vcc.vci, 0, BCODE)); \
2024 en_write(sc, drq + 4, ADDR); \
2026 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, drq, 8); \
2030 * Local macro to generate a DMA entry to DMA cnt bytes. Updates
2031 * the current buffer byte offset accordingly.
2033 #define DO_DRQ(TYPE) do { \
2035 EN_WRAPADD(slot->start, slot->stop, cur, cnt); \
2036 DBG(sc, SERV, ("rx%td: "TYPE" %u bytes, %ju left, cur %#x", \
2037 slot - sc->rxslot, cnt, (uintmax_t)rest, cur)); \
2039 PUT_DRQ_ENTRY(1, bcode, count, addr); \
2045 * Skip the RBD at the beginning
2047 if (rx->pre_skip > 0) {
2048 /* update DMA address */
2049 EN_WRAPADD(slot->start, slot->stop, cur, rx->pre_skip);
2051 PUT_DRQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
2054 for (i = 0; i < nseg; i++, segs++) {
2055 addr = segs->ds_addr;
2056 rest = segs->ds_len;
2058 if (sc->is_adaptec) {
2059 /* adaptec card - simple */
2061 /* advance the on-card buffer pointer */
2062 EN_WRAPADD(slot->start, slot->stop, cur, rest);
2063 DBG(sc, SERV, ("rx%td: adp %ju bytes %#jx "
2064 "(cur now 0x%x)", slot - sc->rxslot,
2065 (uintmax_t)rest, (uintmax_t)addr, cur));
2067 PUT_DRQ_ENTRY(0, 0, rest, addr);
2073 * do we need to do a DMA op to align to the maximum
2074 * burst? Note, that we are alway 32-bit aligned.
2077 (needalign = (addr & sc->bestburstmask)) != 0) {
2078 /* compute number of bytes, words and code */
2079 cnt = sc->bestburstlen - needalign;
2082 count = cnt / sizeof(uint32_t);
2083 if (sc->noalbursts) {
2084 bcode = MIDDMA_WORD;
2086 bcode = en_dmaplan[count].bcode;
2087 count = cnt >> en_dmaplan[count].divshift;
2092 /* do we need to do a max-sized burst? */
2093 if (rest >= sc->bestburstlen) {
2094 count = rest >> sc->bestburstshift;
2095 cnt = count << sc->bestburstshift;
2096 bcode = sc->bestburstcode;
2100 /* do we need to do a cleanup burst? */
2103 count = rest / sizeof(uint32_t);
2104 if (sc->noalbursts) {
2105 bcode = MIDDMA_WORD;
2107 bcode = en_dmaplan[count].bcode;
2108 count = cnt >> en_dmaplan[count].divshift;
2110 DO_DRQ("clean_dma");
2115 * Skip stuff at the end
2117 if (rx->post_skip > 0) {
2118 /* update DMA address */
2119 EN_WRAPADD(slot->start, slot->stop, cur, rx->post_skip);
2121 PUT_DRQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
2124 /* record the end for the interrupt routine */
2125 sc->drq[MID_DRQ_A2REG(last_drq)] =
2126 EN_DQ_MK(slot - sc->rxslot, rx->m->m_pkthdr.len);
2128 /* set the end flag in the last descriptor */
2129 en_write(sc, last_drq + 0, SETQ_END(sc, en_read(sc, last_drq + 0)));
2131 #undef PUT_DRQ_ENTRY
2136 sc->drq_free = free;
2139 /* signal to card */
2140 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_us));
2144 * en_service: handle a service interrupt
2146 * Q: why do we need a software service list?
2148 * A: if we remove a VCI from the hardware list and we find that we are
2149 * out of DRQs we must defer processing until some DRQs become free.
2150 * so we must remember to look at this RX VCI/slot later, but we can't
2151 * put it back on the hardware service list (since that isn't allowed).
2152 * so we instead save it on the software service list. it would be nice
2153 * if we could peek at the VCI on top of the hwservice list without removing
2154 * it, however this leads to a race condition: if we peek at it and
2155 * decide we are done with it new data could come in before we have a
2156 * chance to remove it from the hwslist. by the time we get it out of
2157 * the list the interrupt for the new data will be lost. oops!
2159 * LOCK: locked, needed
2162 en_service(struct en_softc *sc)
2164 struct mbuf *m, *lastm;
2168 uint32_t dstart; /* data start (as reported by card) */
2169 uint32_t rbd; /* receive buffer descriptor */
2170 uint32_t pdu; /* AAL5 trailer */
2173 struct en_rxslot *slot;
2179 if (sc->swsl_size == 0) {
2180 DBG(sc, SERV, ("en_service done"));
2185 * get vcc to service
2187 rx.vc = vc = sc->vccs[sc->swslist[sc->swsl_head]];
2189 KASSERT (slot->vcc->rxslot == slot, ("en_service: rx slot/vci sync"));
2192 * determine our mode and if we've got any work to do
2194 DBG(sc, SERV, ("rx%td: service vci=%d start/stop/cur=0x%x 0x%x "
2195 "0x%x", slot - sc->rxslot, vc->vcc.vci, slot->start,
2196 slot->stop, slot->cur));
2201 dstart = MIDV_DSTART(en_read(sc, MID_DST_RP(vc->vcc.vci)));
2202 dstart = (dstart * sizeof(uint32_t)) + slot->start;
2204 /* check to see if there is any data at all */
2205 if (dstart == cur) {
2206 EN_WRAPADD(0, MID_SL_N, sc->swsl_head, 1);
2207 /* remove from swslist */
2208 vc->vflags &= ~VCC_SWSL;
2210 DBG(sc, SERV, ("rx%td: remove vci %d from swslist",
2211 slot - sc->rxslot, vc->vcc.vci));
2216 * figure out how many bytes we need
2217 * [mlen = # bytes to go in mbufs]
2219 rbd = en_read(sc, cur);
2220 if (MID_RBD_ID(rbd) != MID_RBD_STDID)
2221 panic("en_service: id mismatch");
2223 if (rbd & MID_RBD_T) {
2224 mlen = 0; /* we've got trash */
2225 rx.pre_skip = MID_RBD_SIZE;
2227 EN_COUNT(sc->stats.ttrash);
2228 DBG(sc, SERV, ("RX overflow lost %d cells!", MID_RBD_CNT(rbd)));
2230 } else if (vc->vcc.aal != ATMIO_AAL_5) {
2232 mlen = MID_CHDR_SIZE + MID_ATMDATASZ;
2233 rx.pre_skip = MID_RBD_SIZE;
2237 rx.pre_skip = MID_RBD_SIZE;
2239 /* get PDU trailer in correct byte order */
2240 pdu = cur + MID_RBD_CNT(rbd) * MID_ATMDATASZ +
2241 MID_RBD_SIZE - MID_PDU_SIZE;
2242 if (pdu >= slot->stop)
2243 pdu -= EN_RXSZ * 1024;
2244 pdu = en_read(sc, pdu);
2246 if (MID_RBD_CNT(rbd) * MID_ATMDATASZ <
2248 device_printf(sc->dev, "invalid AAL5 length\n");
2249 rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ;
2251 sc->ifp->if_ierrors++;
2253 } else if (rbd & MID_RBD_CRCERR) {
2254 device_printf(sc->dev, "CRC error\n");
2255 rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ;
2257 sc->ifp->if_ierrors++;
2260 mlen = MID_PDU_LEN(pdu);
2261 rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ - mlen;
2266 * now allocate mbufs for mlen bytes of data, if out of mbufs, trash all
2269 * 1. it is possible that we've already allocated an mbuf for this pkt
2270 * but ran out of DRQs, in which case we saved the allocated mbuf
2272 * 2. if we save an buf in "q" we store the "cur" (pointer) in the
2273 * buf as an identity (that we can check later).
2274 * 3. after this block of code, if m is still NULL then we ran out of
2277 _IF_DEQUEUE(&slot->q, m);
2279 if (m->m_pkthdr.csum_data != cur) {
2281 DBG(sc, SERV, ("rx%td: q'ed buf %p not ours",
2282 slot - sc->rxslot, m));
2283 _IF_PREPEND(&slot->q, m);
2285 EN_COUNT(sc->stats.rxqnotus);
2287 EN_COUNT(sc->stats.rxqus);
2288 DBG(sc, SERV, ("rx%td: recovered q'ed buf %p",
2289 slot - sc->rxslot, m));
2292 if (mlen == 0 && m != NULL) {
2293 /* should not happen */
2298 if (mlen != 0 && m == NULL) {
2299 m = en_mget(sc, mlen);
2301 rx.post_skip += mlen;
2303 EN_COUNT(sc->stats.rxmbufout);
2304 DBG(sc, SERV, ("rx%td: out of mbufs",
2305 slot - sc->rxslot));
2307 rx.post_skip -= roundup(mlen, sizeof(uint32_t)) - mlen;
2309 DBG(sc, SERV, ("rx%td: allocate buf %p, mlen=%d",
2310 slot - sc->rxslot, m, mlen));
2313 DBG(sc, SERV, ("rx%td: VCI %d, rbuf %p, mlen %d, skip %u/%u",
2314 slot - sc->rxslot, vc->vcc.vci, m, mlen, rx.pre_skip,
2318 /* M_NOWAIT - called from interrupt context */
2319 map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
2321 rx.post_skip += mlen;
2323 DBG(sc, SERV, ("rx%td: out of maps",
2324 slot - sc->rxslot));
2328 error = bus_dmamap_load_mbuf(sc->txtag, map->map, m,
2329 en_rxdma_load, &rx, BUS_DMA_NOWAIT);
2332 device_printf(sc->dev, "loading RX map failed "
2334 uma_zfree(sc->map_zone, map);
2336 rx.post_skip += mlen;
2340 map->flags |= ENMAP_LOADED;
2343 /* out of DRQs - wait */
2344 uma_zfree(sc->map_zone, map);
2346 m->m_pkthdr.csum_data = cur;
2347 _IF_ENQUEUE(&slot->q, m);
2348 EN_COUNT(sc->stats.rxdrqout);
2350 sc->need_drqs = 1; /* flag condition */
2354 (void)m_length(m, &lastm);
2355 lastm->m_len -= roundup(mlen, sizeof(uint32_t)) - mlen;
2357 m->m_pkthdr.rcvif = (void *)map;
2358 _IF_ENQUEUE(&slot->indma, m);
2360 /* get next packet in this slot */
2365 * Here we end if we should drop the packet from the receive buffer.
2366 * The number of bytes to drop is in fill. We can do this with on
2367 * JK entry. If we don't even have that one - wait.
2369 if (sc->drq_free == 0) {
2370 sc->need_drqs = 1; /* flag condition */
2373 rx.post_skip += rx.pre_skip;
2374 DBG(sc, SERV, ("rx%td: skipping %u", slot - sc->rxslot, rx.post_skip));
2376 /* advance buffer address */
2377 EN_WRAPADD(slot->start, slot->stop, cur, rx.post_skip);
2379 /* write DRQ entry */
2381 en_write(sc, sc->drq_us,
2382 MID_MK_RXQ_ADP(WORD_IDX(slot->start, cur),
2383 vc->vcc.vci, MID_DMA_END, MIDDMA_JK));
2385 en_write(sc, sc->drq_us,
2386 MID_MK_RXQ_ENI(WORD_IDX(slot->start, cur),
2387 vc->vcc.vci, MID_DMA_END, MIDDMA_JK));
2388 en_write(sc, sc->drq_us + 4, 0);
2389 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_us, 8);
2392 /* signal to RX interrupt */
2393 sc->drq[MID_DRQ_A2REG(sc->drq_us)] = EN_DQ_MK(slot - sc->rxslot, 0);
2396 /* signal to card */
2397 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_us));
2405 * LOCK: unlocked, needed
2410 struct en_softc *sc = arg;
2411 uint32_t reg, kick, mask;
2412 int lcv, need_softserv;
2416 reg = en_read(sc, MID_INTACK);
2417 DBG(sc, INTR, ("interrupt=0x%b", reg, MID_INTBITS));
2419 if ((reg & MID_INT_ANY) == 0) {
2425 * unexpected errors that need a reset
2427 if ((reg & (MID_INT_IDENT | MID_INT_LERR | MID_INT_DMA_ERR)) != 0) {
2428 device_printf(sc->dev, "unexpected interrupt=0x%b, "
2429 "resetting\n", reg, MID_INTBITS);
2431 panic("en: unexpected error");
2440 if (reg & MID_INT_SUNI)
2441 utopia_intr(&sc->utopia);
2444 if (reg & MID_INT_TX)
2445 kick |= en_intr_tx(sc, reg);
2447 if (reg & MID_INT_DMA_TX)
2448 kick |= en_intr_tx_dma(sc);
2451 * kick xmit channels as needed.
2454 DBG(sc, INTR, ("tx kick mask = 0x%x", kick));
2455 for (mask = 1, lcv = 0 ; lcv < EN_NTX ; lcv++, mask = mask * 2)
2456 if ((kick & mask) && _IF_QLEN(&sc->txslot[lcv].q) != 0)
2457 en_txdma(sc, &sc->txslot[lcv]);
2461 if (reg & MID_INT_DMA_RX)
2462 need_softserv |= en_intr_rx_dma(sc);
2464 if (reg & MID_INT_SERVICE)
2465 need_softserv |= en_intr_service(sc);
2473 if (reg & MID_INT_DMA_OVR) {
2474 EN_COUNT(sc->stats.dmaovr);
2475 DBG(sc, INTR, ("MID_INT_DMA_OVR"));
2477 reg = en_read(sc, MID_STAT);
2478 sc->stats.otrash += MID_OTRASH(reg);
2479 sc->stats.vtrash += MID_VTRASH(reg);
2485 * Read at most n SUNI regs starting at reg into val
2488 en_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n)
2490 struct en_softc *sc = ifatm->ifp->if_softc;
2494 if (reg >= MID_NSUNI)
2496 if (reg + *n > MID_NSUNI)
2497 *n = MID_NSUNI - reg;
2499 for (i = 0; i < *n; i++)
2500 val[i] = en_read(sc, MID_SUNIOFF + 4 * (reg + i));
2506 * change the bits given by mask to them in val in register reg
2509 en_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
2511 struct en_softc *sc = ifatm->ifp->if_softc;
2515 if (reg >= MID_NSUNI)
2517 regval = en_read(sc, MID_SUNIOFF + 4 * reg);
2518 regval = (regval & ~mask) | (val & mask);
2519 en_write(sc, MID_SUNIOFF + 4 * reg, regval);
2523 static const struct utopia_methods en_utopia_methods = {
2528 /*********************************************************************/
2530 * Probing the DMA brokeness of the card
2534 * Physical address load helper function for DMA probe
2536 * LOCK: unlocked, not needed
2539 en_dmaprobe_load(void *uarg, bus_dma_segment_t *segs, int nseg, int error)
2542 *(bus_addr_t *)uarg = segs[0].ds_addr;
2546 * en_dmaprobe: helper function for en_attach.
2548 * see how the card handles DMA by running a few DMA tests. we need
2549 * to figure out the largest number of bytes we can DMA in one burst
2550 * ("bestburstlen"), and if the starting address for a burst needs to
2551 * be aligned on any sort of boundary or not ("alburst").
2553 * Things turn out more complex than that, because on my (harti) brand
2554 * new motherboard (2.4GHz) we can do 64byte aligned DMAs, but everything
2555 * we more than 4 bytes fails (with an RX DMA timeout) for physical
2556 * addresses that end with 0xc. Therefor we search not only the largest
2557 * burst that is supported (hopefully 64) but also check what is the largerst
2558 * unaligned supported size. If that appears to be lesser than 4 words,
2559 * set the noalbursts flag. That will be set only if also alburst is set.
2563 * en_dmaprobe_doit: do actual testing for the DMA test.
2564 * Cycle through all bursts sizes from 8 up to 64 and try whether it works.
2565 * Return the largest one that works.
2567 * LOCK: unlocked, not needed
2570 en_dmaprobe_doit(struct en_softc *sc, uint8_t *sp, bus_addr_t psp)
2572 uint8_t *dp = sp + MIDDMA_MAXBURST;
2573 bus_addr_t pdp = psp + MIDDMA_MAXBURST;
2574 int lcv, retval = 4, cnt;
2575 uint32_t reg, bcode, midvloc;
2577 if (sc->en_busreset)
2578 sc->en_busreset(sc);
2579 en_write(sc, MID_RESID, 0x0); /* reset card before touching RAM */
2582 * set up a 1k buffer at MID_BUFOFF
2584 midvloc = ((MID_BUFOFF - MID_RAMOFF) / sizeof(uint32_t))
2586 en_write(sc, MIDX_PLACE(0), MIDX_MKPLACE(en_k2sz(1), midvloc));
2587 en_write(sc, MID_VC(0), (midvloc << MIDV_LOCSHIFT)
2588 | (en_k2sz(1) << MIDV_SZSHIFT) | MIDV_TRASH);
2589 en_write(sc, MID_DST_RP(0), 0);
2590 en_write(sc, MID_WP_ST_CNT(0), 0);
2592 /* set up sample data */
2593 for (lcv = 0 ; lcv < MIDDMA_MAXBURST; lcv++)
2596 /* enable DMA (only) */
2597 en_write(sc, MID_MAST_CSR, MID_MCSR_ENDMA);
2599 sc->drq_chip = MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX));
2600 sc->dtq_chip = MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX));
2603 * try it now . . . DMA it out, then DMA it back in and compare
2605 * note: in order to get the dma stuff to reverse directions it wants
2606 * the "end" flag set! since we are not dma'ing valid data we may
2607 * get an ident mismatch interrupt (which we will ignore).
2609 DBG(sc, DMA, ("test sp=%p/%#lx, dp=%p/%#lx",
2610 sp, (u_long)psp, dp, (u_long)pdp));
2611 for (lcv = 8 ; lcv <= MIDDMA_MAXBURST ; lcv = lcv * 2) {
2612 DBG(sc, DMA, ("test lcv=%d", lcv));
2614 /* zero SRAM and dest buffer */
2615 bus_space_set_region_4(sc->en_memt, sc->en_base,
2616 MID_BUFOFF, 0, 1024 / 4);
2617 bzero(dp, MIDDMA_MAXBURST);
2619 bcode = en_sz2b(lcv);
2621 /* build lcv-byte-DMA x NBURSTS */
2623 en_write(sc, sc->dtq_chip,
2624 MID_MK_TXQ_ADP(lcv, 0, MID_DMA_END, 0));
2626 en_write(sc, sc->dtq_chip,
2627 MID_MK_TXQ_ENI(1, 0, MID_DMA_END, bcode));
2628 en_write(sc, sc->dtq_chip + 4, psp);
2629 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, sc->dtq_chip, 8);
2630 en_write(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_chip));
2633 while ((reg = en_readx(sc, MID_DMA_RDTX)) !=
2634 MID_DTQ_A2REG(sc->dtq_chip)) {
2637 DBG(sc, DMA, ("unexpected timeout in tx "
2638 "DMA test\n alignment=0x%lx, burst size=%d"
2639 ", dma addr reg=%#x, rdtx=%#x, stat=%#x\n",
2640 (u_long)sp & 63, lcv,
2641 en_read(sc, MID_DMA_ADDR), reg,
2642 en_read(sc, MID_INTSTAT)));
2647 reg = en_read(sc, MID_INTACK);
2648 if ((reg & MID_INT_DMA_TX) != MID_INT_DMA_TX) {
2649 DBG(sc, DMA, ("unexpected status in tx DMA test: %#x\n",
2653 /* re-enable DMA (only) */
2654 en_write(sc, MID_MAST_CSR, MID_MCSR_ENDMA);
2656 /* "return to sender..." address is known ... */
2658 /* build lcv-byte-DMA x NBURSTS */
2660 en_write(sc, sc->drq_chip,
2661 MID_MK_RXQ_ADP(lcv, 0, MID_DMA_END, 0));
2663 en_write(sc, sc->drq_chip,
2664 MID_MK_RXQ_ENI(1, 0, MID_DMA_END, bcode));
2665 en_write(sc, sc->drq_chip + 4, pdp);
2666 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_chip, 8);
2667 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
2669 while ((reg = en_readx(sc, MID_DMA_RDRX)) !=
2670 MID_DRQ_A2REG(sc->drq_chip)) {
2674 DBG(sc, DMA, ("unexpected timeout in rx "
2675 "DMA test, rdrx=%#x\n", reg));
2679 reg = en_read(sc, MID_INTACK);
2680 if ((reg & MID_INT_DMA_RX) != MID_INT_DMA_RX) {
2681 DBG(sc, DMA, ("unexpected status in rx DMA "
2682 "test: 0x%x\n", reg));
2685 if (bcmp(sp, dp, lcv)) {
2686 DBG(sc, DMA, ("DMA test failed! lcv=%d, sp=%p, "
2687 "dp=%p", lcv, sp, dp));
2693 return (retval); /* studly 64 byte DMA present! oh baby!! */
2697 * Find the best DMA parameters
2699 * LOCK: unlocked, not needed
2702 en_dmaprobe(struct en_softc *sc)
2708 int bestalgn, lcv, try, bestnoalgn;
2716 * Allocate some DMA-able memory.
2717 * We need 3 times the max burst size aligned to the max burst size.
2719 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), MIDDMA_MAXBURST, 0,
2720 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2721 3 * MIDDMA_MAXBURST, 1, 3 * MIDDMA_MAXBURST, 0,
2724 panic("%s: cannot create test DMA tag %d", __func__, err);
2726 err = bus_dmamem_alloc(tag, &buffer, 0, &map);
2728 panic("%s: cannot allocate test DMA memory %d", __func__, err);
2730 err = bus_dmamap_load(tag, map, buffer, 3 * MIDDMA_MAXBURST,
2731 en_dmaprobe_load, &phys, BUS_DMA_NOWAIT);
2733 panic("%s: cannot load test DMA map %d", __func__, err);
2735 DBG(sc, DMA, ("phys=%#lx addr=%p", (u_long)phys, addr));
2738 * Now get the best burst size of the aligned case.
2740 bestalgn = bestnoalgn = en_dmaprobe_doit(sc, addr, phys);
2743 * Now try unaligned.
2745 for (lcv = 4; lcv < MIDDMA_MAXBURST; lcv += 4) {
2746 try = en_dmaprobe_doit(sc, addr + lcv, phys + lcv);
2748 if (try < bestnoalgn)
2752 if (bestnoalgn < bestalgn) {
2754 if (bestnoalgn < 32)
2758 sc->bestburstlen = bestalgn;
2759 sc->bestburstshift = en_log2(bestalgn);
2760 sc->bestburstmask = sc->bestburstlen - 1; /* must be power of 2 */
2761 sc->bestburstcode = en_sz2b(bestalgn);
2764 * Reset the chip before freeing the buffer. It may still be trying
2767 if (sc->en_busreset)
2768 sc->en_busreset(sc);
2769 en_write(sc, MID_RESID, 0x0); /* reset card before touching RAM */
2771 DELAY(10000); /* may still do DMA */
2774 * Free the DMA stuff
2776 bus_dmamap_unload(tag, map);
2777 bus_dmamem_free(tag, buffer, map);
2778 bus_dma_tag_destroy(tag);
2781 /*********************************************************************/
2787 * Attach to the card.
2789 * LOCK: unlocked, not needed (but initialized)
2792 en_attach(struct en_softc *sc)
2794 struct ifnet *ifp = sc->ifp;
2796 uint32_t reg, lcv, check, ptr, sav, midvloc;
2799 sc->debug = EN_DEBUG;
2803 * Probe card to determine memory size.
2805 * The stupid ENI card always reports to PCI that it needs 4MB of
2806 * space (2MB regs and 2MB RAM). If it has less than 2MB RAM the
2807 * addresses wrap in the RAM address space (i.e. on a 512KB card
2808 * addresses 0x3ffffc, 0x37fffc, and 0x2ffffc are aliases for
2809 * 0x27fffc [note that RAM starts at offset 0x200000]).
2812 /* reset card before touching RAM */
2813 if (sc->en_busreset)
2814 sc->en_busreset(sc);
2815 en_write(sc, MID_RESID, 0x0);
2817 for (lcv = MID_PROBEOFF; lcv <= MID_MAXOFF ; lcv += MID_PROBSIZE) {
2818 en_write(sc, lcv, lcv); /* data[address] = address */
2819 for (check = MID_PROBEOFF; check < lcv ;check += MID_PROBSIZE) {
2820 reg = en_read(sc, check);
2822 /* found an alias! - quit */
2827 lcv -= MID_PROBSIZE; /* take one step back */
2828 sc->en_obmemsz = (lcv + 4) - MID_RAMOFF;
2831 * determine the largest DMA burst supported
2840 if (sc->en_busreset)
2841 sc->en_busreset(sc);
2842 en_write(sc, MID_RESID, 0x0); /* reset */
2845 bus_space_set_region_4(sc->en_memt, sc->en_base,
2846 MID_RAMOFF, 0, sc->en_obmemsz / 4);
2848 reg = en_read(sc, MID_RESID);
2850 device_printf(sc->dev, "ATM midway v%d, board IDs %d.%d, %s%s%s, "
2851 "%ldKB on-board RAM\n", MID_VER(reg), MID_MID(reg), MID_DID(reg),
2852 (MID_IS_SABRE(reg)) ? "sabre controller, " : "",
2853 (MID_IS_SUNI(reg)) ? "SUNI" : "Utopia",
2854 (!MID_IS_SUNI(reg) && MID_IS_UPIPE(reg)) ? " (pipelined)" : "",
2855 (long)sc->en_obmemsz / 1024);
2858 * fill in common ATM interface stuff
2860 IFP2IFATM(sc->ifp)->mib.hw_version = (MID_VER(reg) << 16) |
2861 (MID_MID(reg) << 8) | MID_DID(reg);
2862 if (MID_DID(reg) & 0x4)
2863 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155;
2865 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155;
2867 IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
2868 IFP2IFATM(sc->ifp)->mib.vpi_bits = 0;
2869 IFP2IFATM(sc->ifp)->mib.vci_bits = MID_VCI_BITS;
2870 IFP2IFATM(sc->ifp)->mib.max_vccs = MID_N_VC;
2871 IFP2IFATM(sc->ifp)->mib.max_vpcs = 0;
2873 if (sc->is_adaptec) {
2874 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_ADP155P;
2875 if (sc->bestburstlen == 64 && sc->alburst == 0)
2876 device_printf(sc->dev,
2877 "passed 64 byte DMA test\n");
2879 device_printf(sc->dev, "FAILED DMA TEST: "
2880 "burst=%d, alburst=%d\n", sc->bestburstlen,
2883 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_ENI155P;
2884 device_printf(sc->dev, "maximum DMA burst length = %d "
2885 "bytes%s\n", sc->bestburstlen, sc->alburst ?
2886 sc->noalbursts ? " (no large bursts)" : " (must align)" :
2891 * link into network subsystem and prepare card
2893 sc->ifp->if_softc = sc;
2894 ifp->if_flags = IFF_SIMPLEX;
2895 ifp->if_ioctl = en_ioctl;
2896 ifp->if_start = en_start;
2898 mtx_init(&sc->en_mtx, device_get_nameunit(sc->dev),
2899 MTX_NETWORK_LOCK, MTX_DEF);
2900 cv_init(&sc->cv_close, "VC close");
2903 * Make the sysctl tree
2905 sysctl_ctx_init(&sc->sysctl_ctx);
2907 if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2908 SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2909 device_get_nameunit(sc->dev), CTLFLAG_RD, 0, "")) == NULL)
2912 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2913 OID_AUTO, "istats", CTLTYPE_OPAQUE | CTLFLAG_RD, sc, 0,
2914 en_sysctl_istats, "S", "internal statistics") == NULL)
2918 if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2919 OID_AUTO, "debug", CTLFLAG_RW , &sc->debug, 0, "") == NULL)
2923 IFP2IFATM(sc->ifp)->phy = &sc->utopia;
2924 utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->en_mtx,
2925 &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2926 &en_utopia_methods);
2927 utopia_init_media(&sc->utopia);
2929 MGET(sc->padbuf, M_WAITOK, MT_DATA);
2930 bzero(sc->padbuf->m_data, MLEN);
2932 if (bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
2933 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2934 EN_TXSZ * 1024, EN_MAX_DMASEG, EN_TXSZ * 1024, 0,
2935 NULL, NULL, &sc->txtag))
2938 sc->map_zone = uma_zcreate("en dma maps", sizeof(struct en_map),
2939 en_map_ctor, en_map_dtor, NULL, en_map_fini, UMA_ALIGN_PTR,
2941 if (sc->map_zone == NULL)
2943 uma_zone_set_max(sc->map_zone, EN_MAX_MAPS);
2948 sc->vccs = malloc(MID_N_VC * sizeof(sc->vccs[0]),
2949 M_DEVBUF, M_ZERO | M_WAITOK);
2951 sz = sc->en_obmemsz - (MID_BUFOFF - MID_RAMOFF);
2952 ptr = sav = MID_BUFOFF;
2953 ptr = roundup(ptr, EN_TXSZ * 1024); /* align */
2954 sz = sz - (ptr - sav);
2955 if (EN_TXSZ*1024 * EN_NTX > sz) {
2956 device_printf(sc->dev, "EN_NTX/EN_TXSZ too big\n");
2959 for (lcv = 0 ;lcv < EN_NTX ;lcv++) {
2960 sc->txslot[lcv].mbsize = 0;
2961 sc->txslot[lcv].start = ptr;
2962 ptr += (EN_TXSZ * 1024);
2963 sz -= (EN_TXSZ * 1024);
2964 sc->txslot[lcv].stop = ptr;
2965 sc->txslot[lcv].nref = 0;
2966 DBG(sc, INIT, ("tx%d: start 0x%x, stop 0x%x", lcv,
2967 sc->txslot[lcv].start, sc->txslot[lcv].stop));
2971 ptr = roundup(ptr, EN_RXSZ * 1024); /* align */
2972 sz = sz - (ptr - sav);
2973 sc->en_nrx = sz / (EN_RXSZ * 1024);
2974 if (sc->en_nrx <= 0) {
2975 device_printf(sc->dev, "EN_NTX/EN_TXSZ/EN_RXSZ too big\n");
2980 * ensure that there is always one VC slot on the service list free
2981 * so that we can tell the difference between a full and empty list.
2983 if (sc->en_nrx >= MID_N_VC)
2984 sc->en_nrx = MID_N_VC - 1;
2986 for (lcv = 0 ; lcv < sc->en_nrx ; lcv++) {
2987 sc->rxslot[lcv].vcc = NULL;
2988 midvloc = sc->rxslot[lcv].start = ptr;
2989 ptr += (EN_RXSZ * 1024);
2990 sz -= (EN_RXSZ * 1024);
2991 sc->rxslot[lcv].stop = ptr;
2992 midvloc = midvloc - MID_RAMOFF;
2993 /* mask, cvt to words */
2994 midvloc = (midvloc & ~((EN_RXSZ*1024) - 1)) >> 2;
2995 /* we only want the top 11 bits */
2996 midvloc = midvloc >> MIDV_LOCTOPSHFT;
2997 midvloc = (midvloc & MIDV_LOCMASK) << MIDV_LOCSHIFT;
2998 sc->rxslot[lcv].mode = midvloc |
2999 (en_k2sz(EN_RXSZ) << MIDV_SZSHIFT) | MIDV_TRASH;
3001 DBG(sc, INIT, ("rx%d: start 0x%x, stop 0x%x, mode 0x%x", lcv,
3002 sc->rxslot[lcv].start, sc->rxslot[lcv].stop,
3003 sc->rxslot[lcv].mode));
3006 device_printf(sc->dev, "%d %dKB receive buffers, %d %dKB transmit "
3007 "buffers\n", sc->en_nrx, EN_RXSZ, EN_NTX, EN_TXSZ);
3008 device_printf(sc->dev, "end station identifier (mac address) "
3009 "%6D\n", IFP2IFATM(sc->ifp)->mib.esi, ":");
3012 * Start SUNI stuff. This will call our readregs/writeregs
3013 * functions and these assume the lock to be held so we must get it
3017 utopia_start(&sc->utopia);
3018 utopia_reset(&sc->utopia);
3027 bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
3038 * Free all internal resources. No access to bus resources here.
3039 * No locking required here (interrupt is already disabled).
3041 * LOCK: unlocked, needed (but destroyed)
3044 en_destroy(struct en_softc *sc)
3048 if (sc->utopia.state & UTP_ST_ATTACHED) {
3049 /* these assume the lock to be held */
3051 utopia_stop(&sc->utopia);
3052 utopia_detach(&sc->utopia);
3056 if (sc->vccs != NULL) {
3057 /* get rid of sticky VCCs */
3058 for (i = 0; i < MID_N_VC; i++)
3059 if (sc->vccs[i] != NULL)
3060 uma_zfree(en_vcc_zone, sc->vccs[i]);
3061 free(sc->vccs, M_DEVBUF);
3064 if (sc->padbuf != NULL)
3068 * Destroy the map zone before the tag (the fini function will
3069 * destroy the DMA maps using the tag)
3071 if (sc->map_zone != NULL)
3072 uma_zdestroy(sc->map_zone);
3074 if (sc->txtag != NULL)
3075 bus_dma_tag_destroy(sc->txtag);
3077 (void)sysctl_ctx_free(&sc->sysctl_ctx);
3079 cv_destroy(&sc->cv_close);
3080 mtx_destroy(&sc->en_mtx);
3084 * Module loaded/unloaded
3087 en_modevent(module_t mod __unused, int event, void *arg __unused)
3093 en_vcc_zone = uma_zcreate("EN vccs", sizeof(struct en_vcc),
3094 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
3095 if (en_vcc_zone == NULL)
3100 uma_zdestroy(en_vcc_zone);
3106 /*********************************************************************/
3113 * functions we can call from ddb
3117 * en_dump: dump the state
3119 #define END_SWSL 0x00000040 /* swsl state */
3120 #define END_DRQ 0x00000020 /* drq state */
3121 #define END_DTQ 0x00000010 /* dtq state */
3122 #define END_RX 0x00000008 /* rx state */
3123 #define END_TX 0x00000004 /* tx state */
3124 #define END_MREGS 0x00000002 /* registers */
3125 #define END_STATS 0x00000001 /* dump stats */
3127 #define END_BITS "\20\7SWSL\6DRQ\5DTQ\4RX\3TX\2MREGS\1STATS"
3130 en_dump_stats(const struct en_stats *s)
3132 printf("en_stats:\n");
3133 printf("\t%d/%d mfix (%d failed)\n", s->mfixaddr, s->mfixlen,
3135 printf("\t%d rx dma overflow interrupts\n", s->dmaovr);
3136 printf("\t%d times out of TX space and stalled\n", s->txoutspace);
3137 printf("\t%d times out of DTQs\n", s->txdtqout);
3138 printf("\t%d times launched a packet\n", s->launch);
3139 printf("\t%d times pulled the hw service list\n", s->hwpull);
3140 printf("\t%d times pushed a vci on the sw service list\n", s->swadd);
3141 printf("\t%d times RX pulled an mbuf from Q that wasn't ours\n",
3143 printf("\t%d times RX pulled a good mbuf from Q\n", s->rxqus);
3144 printf("\t%d times ran out of DRQs\n", s->rxdrqout);
3145 printf("\t%d transmit packets dropped due to mbsize\n", s->txmbovr);
3146 printf("\t%d cells trashed due to turned off rxvc\n", s->vtrash);
3147 printf("\t%d cells trashed due to totally full buffer\n", s->otrash);
3148 printf("\t%d cells trashed due almost full buffer\n", s->ttrash);
3149 printf("\t%d rx mbuf allocation failures\n", s->rxmbufout);
3150 printf("\t%d times out of tx maps\n", s->txnomap);
3153 printf("\tnatmintr so_rcv: ok/drop cnt: %d/%d, ok/drop bytes: %d/%d\n",
3154 natm_sookcnt, natm_sodropcnt, natm_sookbytes, natm_sodropbytes);
3160 en_dump_mregs(struct en_softc *sc)
3165 printf("resid = 0x%x\n", en_read(sc, MID_RESID));
3166 printf("interrupt status = 0x%b\n",
3167 (int)en_read(sc, MID_INTSTAT), MID_INTBITS);
3168 printf("interrupt enable = 0x%b\n",
3169 (int)en_read(sc, MID_INTENA), MID_INTBITS);
3170 printf("mcsr = 0x%b\n", (int)en_read(sc, MID_MAST_CSR), MID_MCSRBITS);
3171 printf("serv_write = [chip=%u] [us=%u]\n", en_read(sc, MID_SERV_WRITE),
3172 MID_SL_A2REG(sc->hwslistp));
3173 printf("dma addr = 0x%x\n", en_read(sc, MID_DMA_ADDR));
3174 printf("DRQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
3175 MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX)),
3176 MID_DRQ_REG2A(en_read(sc, MID_DMA_WRRX)), sc->drq_chip, sc->drq_us);
3177 printf("DTQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
3178 MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX)),
3179 MID_DTQ_REG2A(en_read(sc, MID_DMA_WRTX)), sc->dtq_chip, sc->dtq_us);
3181 printf(" unusal txspeeds:");
3182 for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
3183 if (sc->vccs[cnt]->txspeed)
3184 printf(" vci%d=0x%x", cnt, sc->vccs[cnt]->txspeed);
3187 printf(" rxvc slot mappings:");
3188 for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
3189 if (sc->vccs[cnt]->rxslot != NULL)
3190 printf(" %d->%td", cnt,
3191 sc->vccs[cnt]->rxslot - sc->rxslot);
3196 en_dump_tx(struct en_softc *sc)
3201 for (slot = 0 ; slot < EN_NTX; slot++) {
3202 printf("tx%d: start/stop/cur=0x%x/0x%x/0x%x [%d] ", slot,
3203 sc->txslot[slot].start, sc->txslot[slot].stop,
3204 sc->txslot[slot].cur,
3205 (sc->txslot[slot].cur - sc->txslot[slot].start) / 4);
3206 printf("mbsize=%d, bfree=%d\n", sc->txslot[slot].mbsize,
3207 sc->txslot[slot].bfree);
3208 printf("txhw: base_address=0x%x, size=%u, read=%u, "
3210 (u_int)MIDX_BASE(en_read(sc, MIDX_PLACE(slot))),
3211 MIDX_SZ(en_read(sc, MIDX_PLACE(slot))),
3212 en_read(sc, MIDX_READPTR(slot)),
3213 en_read(sc, MIDX_DESCSTART(slot)));
3218 en_dump_rx(struct en_softc *sc)
3220 struct en_rxslot *slot;
3222 printf(" recv slots:\n");
3223 for (slot = sc->rxslot ; slot < &sc->rxslot[sc->en_nrx]; slot++) {
3224 printf("rx%td: start/stop/cur=0x%x/0x%x/0x%x mode=0x%x ",
3225 slot - sc->rxslot, slot->start, slot->stop, slot->cur,
3227 if (slot->vcc != NULL) {
3228 printf("vci=%u\n", slot->vcc->vcc.vci);
3229 printf("RXHW: mode=0x%x, DST_RP=0x%x, WP_ST_CNT=0x%x\n",
3230 en_read(sc, MID_VC(slot->vcc->vcc.vci)),
3231 en_read(sc, MID_DST_RP(slot->vcc->vcc.vci)),
3232 en_read(sc, MID_WP_ST_CNT(slot->vcc->vcc.vci)));
3238 * This is only correct for non-adaptec adapters
3241 en_dump_dtqs(struct en_softc *sc)
3245 printf(" dtq [need_dtqs=%d,dtq_free=%d]:\n", sc->need_dtqs,
3248 while (ptr != sc->dtq_us) {
3249 reg = en_read(sc, ptr);
3250 printf("\t0x%x=[%#x cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n",
3251 sc->dtq[MID_DTQ_A2REG(ptr)], reg, MID_DMA_CNT(reg),
3252 MID_DMA_TXCHAN(reg), (reg & MID_DMA_END) != 0,
3253 MID_DMA_TYPE(reg), en_read(sc, ptr + 4));
3254 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, ptr, 8);
3259 en_dump_drqs(struct en_softc *sc)
3263 printf(" drq [need_drqs=%d,drq_free=%d]:\n", sc->need_drqs,
3266 while (ptr != sc->drq_us) {
3267 reg = en_read(sc, ptr);
3268 printf("\t0x%x=[cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n",
3269 sc->drq[MID_DRQ_A2REG(ptr)], MID_DMA_CNT(reg),
3270 MID_DMA_RXVCI(reg), (reg & MID_DMA_END) != 0,
3271 MID_DMA_TYPE(reg), en_read(sc, ptr + 4));
3272 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, ptr, 8);
3276 /* Do not staticize - meant for calling from DDB! */
3278 en_dump(int unit, int level)
3280 struct en_softc *sc;
3285 dc = devclass_find("en");
3287 printf("%s: can't find devclass!\n", __func__);
3290 maxunit = devclass_get_maxunit(dc);
3291 for (lcv = 0 ; lcv < maxunit ; lcv++) {
3292 sc = devclass_get_softc(dc, lcv);
3295 if (unit != -1 && unit != lcv)
3298 device_printf(sc->dev, "dumping device at level 0x%b\n",
3301 if (sc->dtq_us == 0) {
3302 printf("<hasn't been en_init'd yet>\n");
3306 if (level & END_STATS)
3307 en_dump_stats(&sc->stats);
3308 if (level & END_MREGS)
3314 if (level & END_DTQ)
3316 if (level & END_DRQ)
3319 if (level & END_SWSL) {
3320 printf(" swslist [size=%d]: ", sc->swsl_size);
3321 for (cnt = sc->swsl_head ; cnt != sc->swsl_tail ;
3322 cnt = (cnt + 1) % MID_SL_N)
3323 printf("0x%x ", sc->swslist[cnt]);
3331 * en_dumpmem: dump the memory
3333 * Do not staticize - meant for calling from DDB!
3336 en_dumpmem(int unit, int addr, int len)
3338 struct en_softc *sc;
3342 dc = devclass_find("en");
3344 printf("%s: can't find devclass\n", __func__);
3347 sc = devclass_get_softc(dc, unit);
3349 printf("%s: invalid unit number: %d\n", __func__, unit);
3354 if (addr < MID_RAMOFF || addr + len * 4 > MID_MAXOFF || len <= 0) {
3355 printf("invalid addr/len number: %d, %d\n", addr, len);
3358 printf("dumping %d words starting at offset 0x%x\n", len, addr);
3360 reg = en_read(sc, addr);
3361 printf("mem[0x%x] = 0x%x\n", addr, reg);