2 * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006-2007 Semihalf, Piotr Kruszynski <ppk@semihalf.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Freescale integrated Three-Speed Ethernet Controller (TSEC) driver.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/endian.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/socket.h>
41 #include <sys/sockio.h>
42 #include <sys/sysctl.h>
45 #include <net/ethernet.h>
47 #include <net/if_arp.h>
48 #include <net/if_dl.h>
49 #include <net/if_media.h>
50 #include <net/if_types.h>
51 #include <net/if_vlan_var.h>
53 #include <machine/bus.h>
55 #include <dev/mii/mii.h>
56 #include <dev/mii/miivar.h>
58 #include <dev/tsec/if_tsec.h>
59 #include <dev/tsec/if_tsecreg.h>
61 static int tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag,
62 bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr,
64 static void tsec_dma_ctl(struct tsec_softc *sc, int state);
65 static int tsec_encap(struct tsec_softc *sc, struct mbuf *m_head);
66 static void tsec_free_dma(struct tsec_softc *sc);
67 static void tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr);
68 static int tsec_ifmedia_upd(struct ifnet *ifp);
69 static void tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
70 static int tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
71 struct mbuf **mbufp, uint32_t *paddr);
72 static void tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs,
74 static void tsec_intrs_ctl(struct tsec_softc *sc, int state);
75 static void tsec_init(void *xsc);
76 static void tsec_init_locked(struct tsec_softc *sc);
77 static int tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
78 static void tsec_reset_mac(struct tsec_softc *sc);
79 static void tsec_setfilter(struct tsec_softc *sc);
80 static void tsec_set_mac_address(struct tsec_softc *sc);
81 static void tsec_start(struct ifnet *ifp);
82 static void tsec_start_locked(struct ifnet *ifp);
83 static void tsec_stop(struct tsec_softc *sc);
84 static void tsec_tick(void *arg);
85 static void tsec_watchdog(struct tsec_softc *sc);
87 struct tsec_softc *tsec0_sc = NULL; /* XXX ugly hack! */
89 devclass_t tsec_devclass;
90 DRIVER_MODULE(miibus, tsec, miibus_driver, miibus_devclass, 0, 0);
91 MODULE_DEPEND(tsec, ether, 1, 1, 1);
92 MODULE_DEPEND(tsec, miibus, 1, 1, 1);
95 tsec_attach(struct tsec_softc *sc)
97 uint8_t hwaddr[ETHER_ADDR_LEN];
99 bus_dmamap_t *map_ptr;
100 bus_dmamap_t **map_pptr;
104 /* Reset all TSEC counters */
105 TSEC_TX_RX_COUNTERS_INIT(sc);
107 /* Stop DMA engine if enabled by firmware */
113 /* Disable interrupts for now */
114 tsec_intrs_ctl(sc, 0);
116 /* Allocate a busdma tag and DMA safe memory for TX descriptors. */
117 error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_tx_dtag, &sc->tsec_tx_dmap,
118 sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC,
119 (void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX");
125 /* Allocate a busdma tag and DMA safe memory for RX descriptors. */
126 error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_rx_dtag, &sc->tsec_rx_dmap,
127 sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC,
128 (void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX");
134 /* Allocate a busdma tag for TX mbufs. */
135 error = bus_dma_tag_create(NULL, /* parent */
136 TSEC_TXBUFFER_ALIGNMENT, 0, /* alignment, boundary */
137 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
138 BUS_SPACE_MAXADDR, /* highaddr */
139 NULL, NULL, /* filtfunc, filtfuncarg */
140 MCLBYTES * (TSEC_TX_NUM_DESC - 1),/* maxsize */
141 TSEC_TX_NUM_DESC - 1, /* nsegments */
142 MCLBYTES, 0, /* maxsegsz, flags */
143 NULL, NULL, /* lockfunc, lockfuncarg */
144 &sc->tsec_tx_mtag); /* dmat */
146 device_printf(sc->dev, "failed to allocate busdma tag(tx mbufs)\n");
151 /* Allocate a busdma tag for RX mbufs. */
152 error = bus_dma_tag_create(NULL, /* parent */
153 TSEC_RXBUFFER_ALIGNMENT, 0, /* alignment, boundary */
154 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
155 BUS_SPACE_MAXADDR, /* highaddr */
156 NULL, NULL, /* filtfunc, filtfuncarg */
157 MCLBYTES, /* maxsize */
159 MCLBYTES, 0, /* maxsegsz, flags */
160 NULL, NULL, /* lockfunc, lockfuncarg */
161 &sc->tsec_rx_mtag); /* dmat */
163 device_printf(sc->dev, "failed to allocate busdma tag(rx mbufs)\n");
168 /* Create TX busdma maps */
169 map_ptr = sc->tx_map_data;
170 map_pptr = sc->tx_map_unused_data;
172 for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
173 map_pptr[i] = &map_ptr[i];
174 error = bus_dmamap_create(sc->tsec_tx_mtag, 0, map_pptr[i]);
176 device_printf(sc->dev, "failed to init TX ring\n");
182 /* Create RX busdma maps and zero mbuf handlers */
183 for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
184 error = bus_dmamap_create(sc->tsec_rx_mtag, 0, &sc->rx_data[i].map);
186 device_printf(sc->dev, "failed to init RX ring\n");
190 sc->rx_data[i].mbuf = NULL;
193 /* Create mbufs for RX buffers */
194 for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
195 error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map,
196 &sc->rx_data[i].mbuf, &sc->rx_data[i].paddr);
198 device_printf(sc->dev, "can't load rx DMA map %d, error = "
205 /* Create network interface for upper layers */
206 ifp = sc->tsec_ifp = if_alloc(IFT_ETHER);
208 device_printf(sc->dev, "if_alloc() failed\n");
214 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
215 ifp->if_mtu = ETHERMTU;
216 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST;
217 ifp->if_init = tsec_init;
218 ifp->if_start = tsec_start;
219 ifp->if_ioctl = tsec_ioctl;
221 IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1);
222 ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1;
223 IFQ_SET_READY(&ifp->if_snd);
225 /* XXX No special features of TSEC are supported currently */
226 ifp->if_capabilities = 0;
227 ifp->if_capenable = ifp->if_capabilities;
230 error = mii_phy_probe(sc->dev, &sc->tsec_miibus, tsec_ifmedia_upd,
233 device_printf(sc->dev, "MII failed to find PHY!\n");
239 sc->tsec_mii = device_get_softc(sc->tsec_miibus);
241 /* Set MAC address */
242 tsec_get_hwaddr(sc, hwaddr);
243 ether_ifattach(ifp, hwaddr);
249 tsec_detach(struct tsec_softc *sc)
252 /* Stop TSEC controller and free TX queue */
253 if (sc->sc_rres && sc->tsec_ifp)
254 tsec_shutdown(sc->dev);
256 /* Detach network interface */
258 ether_ifdetach(sc->tsec_ifp);
259 if_free(sc->tsec_ifp);
263 /* Free DMA resources */
270 tsec_shutdown(device_t dev)
272 struct tsec_softc *sc;
274 sc = device_get_softc(dev);
276 TSEC_GLOBAL_LOCK(sc);
278 TSEC_GLOBAL_UNLOCK(sc);
282 tsec_suspend(device_t dev)
285 /* TODO not implemented! */
290 tsec_resume(device_t dev)
293 /* TODO not implemented! */
300 struct tsec_softc *sc = xsc;
302 TSEC_GLOBAL_LOCK(sc);
303 tsec_init_locked(sc);
304 TSEC_GLOBAL_UNLOCK(sc);
308 tsec_init_locked(struct tsec_softc *sc)
310 struct tsec_desc *tx_desc = sc->tsec_tx_vaddr;
311 struct tsec_desc *rx_desc = sc->tsec_rx_vaddr;
312 struct ifnet *ifp = sc->tsec_ifp;
317 TSEC_GLOBAL_LOCK_ASSERT(sc);
321 * These steps are according to the MPC8555E PowerQUICCIII RM:
322 * 14.7 Initialization/Application Information
325 /* Step 1: soft reset MAC */
328 /* Step 2: Initialize MACCFG2 */
329 TSEC_WRITE(sc, TSEC_REG_MACCFG2,
330 TSEC_MACCFG2_FULLDUPLEX | /* Full Duplex = 1 */
331 TSEC_MACCFG2_PADCRC | /* PAD/CRC append */
332 TSEC_MACCFG2_GMII | /* I/F Mode bit */
333 TSEC_MACCFG2_PRECNT /* Preamble count = 7 */
336 /* Step 3: Initialize ECNTRL
337 * While the documentation states that R100M is ignored if RPM is
338 * not set, it does seem to be needed to get the orange boxes to
339 * work (which have a Marvell 88E1111 PHY). Go figure.
343 * XXX kludge - use circumstancial evidence to program ECNTRL
344 * correctly. Ideally we need some board information to guide
347 i = TSEC_READ(sc, TSEC_REG_ID2);
349 ? (TSEC_ECNTRL_TBIM | TSEC_ECNTRL_SGMIIM) /* Sumatra */
350 : TSEC_ECNTRL_R100M; /* Orange + CDS */
351 TSEC_WRITE(sc, TSEC_REG_ECNTRL, TSEC_ECNTRL_STEN | val);
353 /* Step 4: Initialize MAC station address */
354 tsec_set_mac_address(sc);
357 * Step 5: Assign a Physical address to the TBI so as to not conflict
358 * with the external PHY physical address
360 TSEC_WRITE(sc, TSEC_REG_TBIPA, 5);
362 /* Step 6: Reset the management interface */
363 TSEC_WRITE(tsec0_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_RESETMGMT);
365 /* Step 7: Setup the MII Mgmt clock speed */
366 TSEC_WRITE(tsec0_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_CLKDIV28);
368 /* Step 8: Read MII Mgmt indicator register and check for Busy = 0 */
369 timeout = TSEC_READ_RETRY;
370 while (--timeout && (TSEC_READ(tsec0_sc, TSEC_REG_MIIMIND) &
372 DELAY(TSEC_READ_DELAY);
374 if_printf(ifp, "tsec_init_locked(): Mgmt busy timeout\n");
378 /* Step 9: Setup the MII Mgmt */
379 mii_mediachg(sc->tsec_mii);
381 /* Step 10: Clear IEVENT register */
382 TSEC_WRITE(sc, TSEC_REG_IEVENT, 0xffffffff);
384 /* Step 11: Initialize IMASK */
385 tsec_intrs_ctl(sc, 1);
387 /* Step 12: Initialize IADDRn */
388 TSEC_WRITE(sc, TSEC_REG_IADDR0, 0);
389 TSEC_WRITE(sc, TSEC_REG_IADDR1, 0);
390 TSEC_WRITE(sc, TSEC_REG_IADDR2, 0);
391 TSEC_WRITE(sc, TSEC_REG_IADDR3, 0);
392 TSEC_WRITE(sc, TSEC_REG_IADDR4, 0);
393 TSEC_WRITE(sc, TSEC_REG_IADDR5, 0);
394 TSEC_WRITE(sc, TSEC_REG_IADDR6, 0);
395 TSEC_WRITE(sc, TSEC_REG_IADDR7, 0);
397 /* Step 13: Initialize GADDRn */
398 TSEC_WRITE(sc, TSEC_REG_GADDR0, 0);
399 TSEC_WRITE(sc, TSEC_REG_GADDR1, 0);
400 TSEC_WRITE(sc, TSEC_REG_GADDR2, 0);
401 TSEC_WRITE(sc, TSEC_REG_GADDR3, 0);
402 TSEC_WRITE(sc, TSEC_REG_GADDR4, 0);
403 TSEC_WRITE(sc, TSEC_REG_GADDR5, 0);
404 TSEC_WRITE(sc, TSEC_REG_GADDR6, 0);
405 TSEC_WRITE(sc, TSEC_REG_GADDR7, 0);
407 /* Step 14: Initialize RCTRL */
408 TSEC_WRITE(sc, TSEC_REG_RCTRL, 0);
410 /* Step 15: Initialize DMACTRL */
413 /* Step 16: Initialize FIFO_PAUSE_CTRL */
414 TSEC_WRITE(sc, TSEC_REG_FIFO_PAUSE_CTRL, TSEC_FIFO_PAUSE_CTRL_EN);
417 * Step 17: Initialize transmit/receive descriptor rings.
418 * Initialize TBASE and RBASE.
420 TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr);
421 TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr);
423 for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
424 tx_desc[i].bufptr = 0;
425 tx_desc[i].length = 0;
426 tx_desc[i].flags = ((i == TSEC_TX_NUM_DESC - 1) ? TSEC_TXBD_W : 0);
428 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
429 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
431 for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
432 rx_desc[i].bufptr = sc->rx_data[i].paddr;
433 rx_desc[i].length = 0;
434 rx_desc[i].flags = TSEC_RXBD_E | TSEC_RXBD_I |
435 ((i == TSEC_RX_NUM_DESC - 1) ? TSEC_RXBD_W : 0);
437 bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_PREREAD |
438 BUS_DMASYNC_PREWRITE);
440 /* Step 18: Initialize the maximum and minimum receive buffer length */
441 TSEC_WRITE(sc, TSEC_REG_MRBLR, TSEC_DEFAULT_MAX_RX_BUFFER_SIZE);
442 TSEC_WRITE(sc, TSEC_REG_MINFLR, TSEC_DEFAULT_MIN_RX_BUFFER_SIZE);
444 /* Step 19: Enable Rx and RxBD sdata snooping */
445 TSEC_WRITE(sc, TSEC_REG_ATTR, TSEC_ATTR_RDSEN | TSEC_ATTR_RBDSEN);
446 TSEC_WRITE(sc, TSEC_REG_ATTRELI, 0);
448 /* Step 20: Reset collision counters in hardware */
449 TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
450 TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
451 TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
452 TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
453 TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
455 /* Step 21: Mask all CAM interrupts */
456 TSEC_WRITE(sc, TSEC_REG_MON_CAM1, 0xffffffff);
457 TSEC_WRITE(sc, TSEC_REG_MON_CAM2, 0xffffffff);
459 /* Step 22: Enable Rx and Tx */
460 val = TSEC_READ(sc, TSEC_REG_MACCFG1);
461 val |= (TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
462 TSEC_WRITE(sc, TSEC_REG_MACCFG1, val);
464 /* Step 23: Reset TSEC counters for Tx and Rx rings */
465 TSEC_TX_RX_COUNTERS_INIT(sc);
467 /* Step 24: Activate network interface */
468 ifp->if_drv_flags |= IFF_DRV_RUNNING;
469 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
470 sc->tsec_if_flags = ifp->if_flags;
471 sc->tsec_watchdog = 0;
473 /* Schedule watchdog timeout */
474 callout_reset(&sc->tsec_callout, hz, tsec_tick, sc);
478 tsec_set_mac_address(struct tsec_softc *sc)
480 uint32_t macbuf[2] = { 0, 0 };
485 TSEC_GLOBAL_LOCK_ASSERT(sc);
487 KASSERT((ETHER_ADDR_LEN <= sizeof(macbuf)),
488 ("tsec_set_mac_address: (%d <= %d", ETHER_ADDR_LEN, sizeof(macbuf)));
490 macbufp = (char *)macbuf;
491 curmac = (char *)IF_LLADDR(sc->tsec_ifp);
493 /* Correct order of MAC address bytes */
494 for (i = 1; i <= ETHER_ADDR_LEN; i++)
495 macbufp[ETHER_ADDR_LEN-i] = curmac[i-1];
497 /* Initialize MAC station address MACSTNADDR2 and MACSTNADDR1 */
498 TSEC_WRITE(sc, TSEC_REG_MACSTNADDR2, macbuf[1]);
499 TSEC_WRITE(sc, TSEC_REG_MACSTNADDR1, macbuf[0]);
503 * DMA control function, if argument state is:
504 * 0 - DMA engine will be disabled
505 * 1 - DMA engine will be enabled
508 tsec_dma_ctl(struct tsec_softc *sc, int state)
516 dma_flags = TSEC_READ(sc, TSEC_REG_DMACTRL);
520 /* Temporarily clear stop graceful stop bits. */
521 tsec_dma_ctl(sc, 1000);
524 dma_flags |= (TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
528 /* Set write with response (WWR), wait (WOP) and snoop bits */
529 dma_flags |= (TSEC_DMACTRL_TDSEN | TSEC_DMACTRL_TBDSEN |
530 DMACTRL_WWR | DMACTRL_WOP);
532 /* Clear graceful stop bits */
533 dma_flags &= ~(TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
536 device_printf(dev, "tsec_dma_ctl(): unknown state value: %d\n",
540 TSEC_WRITE(sc, TSEC_REG_DMACTRL, dma_flags);
544 /* Wait for DMA stop */
545 timeout = TSEC_READ_RETRY;
546 while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) &
547 (TSEC_IEVENT_GRSC | TSEC_IEVENT_GTSC))))
548 DELAY(TSEC_READ_DELAY);
551 device_printf(dev, "tsec_dma_ctl(): timeout!\n");
554 /* Restart transmission function */
555 TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
560 * Interrupts control function, if argument state is:
561 * 0 - all TSEC interrupts will be masked
562 * 1 - all TSEC interrupts will be unmasked
565 tsec_intrs_ctl(struct tsec_softc *sc, int state)
573 TSEC_WRITE(sc, TSEC_REG_IMASK, 0);
576 TSEC_WRITE(sc, TSEC_REG_IMASK, TSEC_IMASK_BREN | TSEC_IMASK_RXCEN |
577 TSEC_IMASK_BSYEN | TSEC_IMASK_EBERREN | TSEC_IMASK_BTEN |
578 TSEC_IMASK_TXEEN | TSEC_IMASK_TXBEN | TSEC_IMASK_TXFEN |
579 TSEC_IMASK_XFUNEN | TSEC_IMASK_RXFEN);
582 device_printf(dev, "tsec_intrs_ctl(): unknown state value: %d\n",
588 tsec_reset_mac(struct tsec_softc *sc)
590 uint32_t maccfg1_flags;
592 /* Set soft reset bit */
593 maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
594 maccfg1_flags |= TSEC_MACCFG1_SOFT_RESET;
595 TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
597 /* Clear soft reset bit */
598 maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
599 maccfg1_flags &= ~TSEC_MACCFG1_SOFT_RESET;
600 TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
604 tsec_watchdog(struct tsec_softc *sc)
608 TSEC_GLOBAL_LOCK_ASSERT(sc);
610 if (sc->tsec_watchdog == 0 || --sc->tsec_watchdog > 0)
615 if_printf(ifp, "watchdog timeout\n");
618 tsec_init_locked(sc);
622 tsec_start(struct ifnet *ifp)
624 struct tsec_softc *sc = ifp->if_softc;
626 TSEC_TRANSMIT_LOCK(sc);
627 tsec_start_locked(ifp);
628 TSEC_TRANSMIT_UNLOCK(sc);
632 tsec_start_locked(struct ifnet *ifp)
634 struct tsec_softc *sc;
637 unsigned int queued = 0;
641 TSEC_TRANSMIT_LOCK_ASSERT(sc);
643 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
647 if (sc->tsec_link == 0)
650 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_POSTREAD |
651 BUS_DMASYNC_POSTWRITE);
654 /* Get packet from the queue */
655 IF_DEQUEUE(&ifp->if_snd, m0);
659 mtmp = m_defrag(m0, M_DONTWAIT);
663 if (tsec_encap(sc, m0)) {
664 IF_PREPEND(&ifp->if_snd, m0);
665 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
671 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_PREREAD |
672 BUS_DMASYNC_PREWRITE);
675 /* Enable transmitter and watchdog timer */
676 TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
677 sc->tsec_watchdog = 5;
682 tsec_encap(struct tsec_softc *sc, struct mbuf *m0)
684 struct tsec_desc *tx_desc = NULL;
686 bus_dma_segment_t segs[TSEC_TX_NUM_DESC];
691 TSEC_TRANSMIT_LOCK_ASSERT(sc);
695 if (TSEC_FREE_TX_DESC(sc) == 0) {
696 /* No free descriptors */
700 /* Fetch unused map */
701 mapp = TSEC_ALLOC_TX_MAP(sc);
703 /* Create mapping in DMA memory */
704 error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag,
705 *mapp, m0, segs, &nsegs, BUS_DMA_NOWAIT);
706 if (error != 0 || nsegs > TSEC_FREE_TX_DESC(sc) || nsegs <= 0) {
707 bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
708 TSEC_FREE_TX_MAP(sc, mapp);
709 return ((error != 0) ? error : -1);
711 bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_PREWRITE);
713 if ((ifp->if_flags & IFF_DEBUG) && (nsegs > 1))
714 if_printf(ifp, "TX buffer has %d segments\n", nsegs);
716 /* Everything is ok, now we can send buffers */
717 for (seg = 0; seg < nsegs; seg++) {
718 tx_desc = TSEC_GET_CUR_TX_DESC(sc);
720 tx_desc->length = segs[seg].ds_len;
721 tx_desc->bufptr = segs[seg].ds_addr;
724 (tx_desc->flags & TSEC_TXBD_W) | /* wrap */
725 TSEC_TXBD_I | /* interrupt */
726 TSEC_TXBD_R | /* ready to send */
727 TSEC_TXBD_TC | /* transmit the CRC sequence
728 * after the last data byte */
729 ((seg == nsegs-1) ? TSEC_TXBD_L : 0);/* last in frame */
732 /* Save mbuf and DMA mapping for release at later stage */
733 TSEC_PUT_TX_MBUF(sc, m0);
734 TSEC_PUT_TX_MAP(sc, mapp);
740 tsec_setfilter(struct tsec_softc *sc)
746 flags = TSEC_READ(sc, TSEC_REG_RCTRL);
748 /* Promiscuous mode */
749 if (ifp->if_flags & IFF_PROMISC)
750 flags |= TSEC_RCTRL_PROM;
752 flags &= ~TSEC_RCTRL_PROM;
754 TSEC_WRITE(sc, TSEC_REG_RCTRL, flags);
758 tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
760 struct tsec_softc *sc = ifp->if_softc;
761 struct ifreq *ifr = (struct ifreq *)data;
769 TSEC_GLOBAL_LOCK(sc);
770 if (ifp->if_flags & IFF_UP) {
771 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
772 if ((sc->tsec_if_flags ^ ifp->if_flags) & IFF_PROMISC)
775 tsec_init_locked(sc);
776 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
779 sc->tsec_if_flags = ifp->if_flags;
780 TSEC_GLOBAL_UNLOCK(sc);
784 error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media, command);
787 error = ether_ioctl(ifp, command, data);
790 /* Flush buffers if not empty */
791 if (ifp->if_flags & IFF_UP)
797 tsec_ifmedia_upd(struct ifnet *ifp)
799 struct tsec_softc *sc = ifp->if_softc;
800 struct mii_data *mii;
802 TSEC_TRANSMIT_LOCK(sc);
807 TSEC_TRANSMIT_UNLOCK(sc);
812 tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
814 struct tsec_softc *sc = ifp->if_softc;
815 struct mii_data *mii;
817 TSEC_TRANSMIT_LOCK(sc);
822 ifmr->ifm_active = mii->mii_media_active;
823 ifmr->ifm_status = mii->mii_media_status;
825 TSEC_TRANSMIT_UNLOCK(sc);
829 tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
832 struct mbuf *new_mbuf;
833 bus_dma_segment_t seg[1];
837 KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
839 new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
840 if (new_mbuf == NULL)
842 new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
845 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
846 bus_dmamap_unload(tag, map);
849 error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
851 KASSERT(nsegs == 1, ("Too many segments returned!"));
852 if (nsegs != 1 || error)
853 panic("tsec_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
857 printf("tsec: bus_dmamap_load_mbuf_sg() returned: %d!\n",
865 KASSERT(((seg->ds_addr) & (TSEC_RXBUFFER_ALIGNMENT-1)) == 0,
866 ("Wrong alignment of RX buffer!"));
868 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
871 (*paddr) = seg->ds_addr;
876 tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
880 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
882 *paddr = segs->ds_addr;
886 tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap,
887 bus_size_t dsize, void **vaddr, void *raddr, const char *dname)
891 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
892 error = bus_dma_tag_create(NULL, /* parent */
893 PAGE_SIZE, 0, /* alignment, boundary */
894 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
895 BUS_SPACE_MAXADDR, /* highaddr */
896 NULL, NULL, /* filtfunc, filtfuncarg */
897 dsize, 1, /* maxsize, nsegments */
898 dsize, 0, /* maxsegsz, flags */
899 NULL, NULL, /* lockfunc, lockfuncarg */
903 device_printf(dev, "failed to allocate busdma %s tag\n", dname);
908 error = bus_dmamem_alloc(*dtag, vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
911 device_printf(dev, "failed to allocate %s DMA safe memory\n",
913 bus_dma_tag_destroy(*dtag);
918 error = bus_dmamap_load(*dtag, *dmap, *vaddr, dsize, tsec_map_dma_addr,
919 raddr, BUS_DMA_NOWAIT);
921 device_printf(dev, "cannot get address of the %s descriptors\n",
923 bus_dmamem_free(*dtag, *vaddr, *dmap);
924 bus_dma_tag_destroy(*dtag);
933 tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr)
939 /* Unmap descriptors from DMA memory */
940 bus_dmamap_sync(dtag, dmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
941 bus_dmamap_unload(dtag, dmap);
943 /* Free descriptors memory */
944 bus_dmamem_free(dtag, vaddr, dmap);
946 /* Destroy descriptors tag */
947 bus_dma_tag_destroy(dtag);
951 tsec_free_dma(struct tsec_softc *sc)
956 for (i = 0; i < TSEC_TX_NUM_DESC; i++)
957 if (sc->tx_map_data[i] != NULL)
958 bus_dmamap_destroy(sc->tsec_tx_mtag, sc->tx_map_data[i]);
959 /* Destroy tag for Tx mbufs */
960 bus_dma_tag_destroy(sc->tsec_tx_mtag);
962 /* Free RX mbufs and maps */
963 for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
964 if (sc->rx_data[i].mbuf) {
965 /* Unload buffer from DMA */
966 bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map,
967 BUS_DMASYNC_POSTREAD);
968 bus_dmamap_unload(sc->tsec_rx_mtag, sc->rx_data[i].map);
971 m_freem(sc->rx_data[i].mbuf);
973 /* Destroy map for this buffer */
974 if (sc->rx_data[i].map != NULL)
975 bus_dmamap_destroy(sc->tsec_rx_mtag,
978 /* Destroy tag for Rx mbufs */
979 bus_dma_tag_destroy(sc->tsec_rx_mtag);
981 /* Unload TX/RX descriptors */
982 tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
984 tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
989 tsec_stop(struct tsec_softc *sc)
996 TSEC_GLOBAL_LOCK_ASSERT(sc);
1000 /* Stop tick engine */
1001 callout_stop(&sc->tsec_callout);
1003 /* Disable interface and watchdog timer */
1004 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1005 sc->tsec_watchdog = 0;
1007 /* Disable all interrupts and stop DMA */
1008 tsec_intrs_ctl(sc, 0);
1009 tsec_dma_ctl(sc, 0);
1011 /* Remove pending data from TX queue */
1012 while (!TSEC_EMPTYQ_TX_MBUF(sc)) {
1013 m0 = TSEC_GET_TX_MBUF(sc);
1014 mapp = TSEC_GET_TX_MAP(sc);
1016 bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_POSTWRITE);
1017 bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
1019 TSEC_FREE_TX_MAP(sc, mapp);
1023 /* Disable Rx and Tx */
1024 tmpval = TSEC_READ(sc, TSEC_REG_MACCFG1);
1025 tmpval &= ~(TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
1026 TSEC_WRITE(sc, TSEC_REG_MACCFG1, tmpval);
1031 tsec_receive_intr(void *arg)
1033 struct mbuf *rcv_mbufs[TSEC_RX_NUM_DESC];
1034 struct tsec_softc *sc = arg;
1035 struct tsec_desc *rx_desc;
1037 struct rx_data_type *rx_data;
1048 rx_data = sc->rx_data;
1051 /* Confirm the interrupt was received by driver */
1052 TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
1054 TSEC_RECEIVE_LOCK(sc);
1056 bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_POSTREAD |
1057 BUS_DMASYNC_POSTWRITE);
1059 for (count = 0; /* count < TSEC_RX_NUM_DESC */; count++) {
1060 rx_desc = TSEC_GET_CUR_RX_DESC(sc);
1061 flags = rx_desc->flags;
1063 /* Check if there is anything to receive */
1064 if ((flags & TSEC_RXBD_E) || (count >= TSEC_RX_NUM_DESC)) {
1066 * Avoid generating another interrupt
1068 if (flags & TSEC_RXBD_E)
1069 TSEC_WRITE(sc, TSEC_REG_IEVENT,
1070 TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
1072 * We didn't consume current descriptor and have to
1073 * return it to the queue
1075 TSEC_BACK_CUR_RX_DESC(sc);
1079 if (flags & (TSEC_RXBD_LG | TSEC_RXBD_SH | TSEC_RXBD_NO |
1080 TSEC_RXBD_CR | TSEC_RXBD_OV | TSEC_RXBD_TR)) {
1082 rx_desc->length = 0;
1083 rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) |
1084 TSEC_RXBD_E | TSEC_RXBD_I;
1088 if ((flags & TSEC_RXBD_L) == 0)
1089 device_printf(dev, "buf is not the last in frame!\n");
1091 /* Ok... process frame */
1092 length = rx_desc->length - ETHER_CRC_LEN;
1093 i = TSEC_GET_CUR_RX_DESC_CNT(sc);
1095 m = rx_data[i].mbuf;
1097 if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map,
1098 &rx_data[i].mbuf, &rx_data[i].paddr)) {
1102 /* Attach new buffer to descriptor, and clear flags */
1103 rx_desc->bufptr = rx_data[i].paddr;
1104 rx_desc->length = 0;
1105 rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) |
1106 TSEC_RXBD_E | TSEC_RXBD_I;
1108 /* Prepare buffer for upper layers */
1109 m->m_pkthdr.rcvif = ifp;
1110 m->m_pkthdr.len = m->m_len = length;
1112 /* Save it for push */
1113 rcv_mbufs[c1++] = m;
1116 bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_PREREAD |
1117 BUS_DMASYNC_PREWRITE);
1119 TSEC_RECEIVE_UNLOCK(sc);
1122 for (c2 = 0; c2 < c1; c2++)
1123 (*ifp->if_input)(ifp, rcv_mbufs[c2]);
1127 tsec_transmit_intr(void *arg)
1129 struct tsec_softc *sc = arg;
1130 struct tsec_desc *tx_desc;
1138 /* Confirm the interrupt was received by driver */
1139 TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_TXB | TSEC_IEVENT_TXF);
1141 TSEC_TRANSMIT_LOCK(sc);
1143 /* Update collision statistics */
1144 ifp->if_collisions += TSEC_READ(sc, TSEC_REG_MON_TNCL);
1146 /* Reset collision counters in hardware */
1147 TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
1148 TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
1149 TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
1150 TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
1151 TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
1153 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
1154 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1156 while (TSEC_CUR_DIFF_DIRTY_TX_DESC(sc)) {
1157 tx_desc = TSEC_GET_DIRTY_TX_DESC(sc);
1158 if (tx_desc->flags & TSEC_TXBD_R) {
1159 TSEC_BACK_DIRTY_TX_DESC(sc);
1163 if ((tx_desc->flags & TSEC_TXBD_L) == 0)
1167 * This is the last buf in this packet, so unmap and free it.
1169 m0 = TSEC_GET_TX_MBUF(sc);
1170 mapp = TSEC_GET_TX_MAP(sc);
1172 bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_POSTWRITE);
1173 bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
1175 TSEC_FREE_TX_MAP(sc, mapp);
1181 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_PREREAD |
1182 BUS_DMASYNC_PREWRITE);
1185 /* Now send anything that was pending */
1186 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1187 tsec_start_locked(ifp);
1189 /* Stop watchdog if all sent */
1190 if (TSEC_EMPTYQ_TX_MBUF(sc))
1191 sc->tsec_watchdog = 0;
1193 TSEC_TRANSMIT_UNLOCK(sc);
1197 tsec_error_intr(void *arg)
1199 struct tsec_softc *sc = arg;
1205 eflags = TSEC_READ(sc, TSEC_REG_IEVENT);
1207 if (ifp->if_flags & IFF_DEBUG)
1208 if_printf(ifp, "tsec_error_intr(): event flags: 0x%x\n", eflags);
1210 /* Clear events bits in hardware */
1211 TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXC | TSEC_IEVENT_BSY |
1212 TSEC_IEVENT_EBERR | TSEC_IEVENT_MSRO | TSEC_IEVENT_BABT |
1213 TSEC_IEVENT_TXC | TSEC_IEVENT_TXE | TSEC_IEVENT_LC |
1214 TSEC_IEVENT_CRL | TSEC_IEVENT_XFUN);
1216 if (eflags & TSEC_IEVENT_EBERR)
1217 if_printf(ifp, "System bus error occurred during"
1218 " a DMA transaction (flags: 0x%x)\n", eflags);
1220 /* Check transmitter errors */
1221 if (eflags & TSEC_IEVENT_TXE) {
1224 if (eflags & TSEC_IEVENT_LC)
1225 ifp->if_collisions++;
1227 TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
1229 if (eflags & TSEC_IEVENT_BABT)
1232 /* Check receiver errors */
1233 if (eflags & TSEC_IEVENT_BSY) {
1237 /* Get data from RX buffers */
1238 tsec_receive_intr(arg);
1240 /* Make receiver again active */
1241 TSEC_WRITE(sc, TSEC_REG_RSTAT, TSEC_RSTAT_QHLT);
1243 if (eflags & TSEC_IEVENT_BABR)
1248 tsec_tick(void *xsc)
1250 struct tsec_softc *sc = xsc;
1254 TSEC_GLOBAL_LOCK(sc);
1259 link = sc->tsec_link;
1261 mii_tick(sc->tsec_mii);
1263 if (link == 0 && sc->tsec_link == 1 && (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)))
1264 tsec_start_locked(ifp);
1266 /* Schedule another timeout one second from now. */
1267 callout_reset(&sc->tsec_callout, hz, tsec_tick, sc);
1269 TSEC_GLOBAL_UNLOCK(sc);
1273 tsec_miibus_readreg(device_t dev, int phy, int reg)
1275 struct tsec_softc *sc;
1278 sc = device_get_softc(dev);
1280 if (device_get_unit(dev) != phy)
1285 TSEC_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
1286 TSEC_WRITE(sc, TSEC_REG_MIIMCOM, 0);
1287 TSEC_WRITE(sc, TSEC_REG_MIIMCOM, TSEC_MIIMCOM_READCYCLE);
1289 timeout = TSEC_READ_RETRY;
1290 while (--timeout && TSEC_READ(sc, TSEC_REG_MIIMIND) &
1291 (TSEC_MIIMIND_NOTVALID | TSEC_MIIMIND_BUSY))
1292 DELAY(TSEC_READ_DELAY);
1295 device_printf(dev, "Timeout while reading from PHY!\n");
1297 return (TSEC_READ(sc, TSEC_REG_MIIMSTAT));
1301 tsec_miibus_writereg(device_t dev, int phy, int reg, int value)
1303 struct tsec_softc *sc;
1306 sc = device_get_softc(dev);
1308 if (device_get_unit(dev) != phy)
1309 device_printf(dev, "Trying to write to an alien PHY(%d)\n", phy);
1313 TSEC_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
1314 TSEC_WRITE(sc, TSEC_REG_MIIMCON, value);
1316 timeout = TSEC_READ_RETRY;
1317 while (--timeout && (TSEC_READ(sc, TSEC_REG_MIIMIND) & TSEC_MIIMIND_BUSY))
1318 DELAY(TSEC_READ_DELAY);
1321 device_printf(dev, "Timeout while writing to PHY!\n");
1325 tsec_miibus_statchg(device_t dev)
1327 struct tsec_softc *sc;
1328 struct mii_data *mii;
1329 uint32_t ecntrl, id, tmp;
1332 sc = device_get_softc(dev);
1334 link = ((mii->mii_media_status & IFM_ACTIVE) ? 1 : 0);
1336 tmp = TSEC_READ(sc, TSEC_REG_MACCFG2) & ~TSEC_MACCFG2_IF;
1338 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
1339 tmp |= TSEC_MACCFG2_FULLDUPLEX;
1341 tmp &= ~TSEC_MACCFG2_FULLDUPLEX;
1343 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1346 tmp |= TSEC_MACCFG2_GMII;
1347 sc->tsec_link = link;
1351 tmp |= TSEC_MACCFG2_MII;
1352 sc->tsec_link = link;
1356 device_printf(dev, "No speed selected but link active!\n");
1361 device_printf(dev, "Unknown speed (%d), link %s!\n",
1362 IFM_SUBTYPE(mii->mii_media_active),
1363 ((link) ? "up" : "down"));
1366 TSEC_WRITE(sc, TSEC_REG_MACCFG2, tmp);
1368 /* XXX kludge - use circumstantial evidence for reduced mode. */
1369 id = TSEC_READ(sc, TSEC_REG_ID2);
1371 ecntrl = TSEC_READ(sc, TSEC_REG_ECNTRL) & ~TSEC_ECNTRL_R100M;
1372 ecntrl |= (tmp & TSEC_MACCFG2_MII) ? TSEC_ECNTRL_R100M : 0;
1373 TSEC_WRITE(sc, TSEC_REG_ECNTRL, ecntrl);