2 * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski
3 * Copyright (C) 2006-2007 Semihalf, Piotr Kruszynski
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Freescale integrated Three-Speed Ethernet Controller (TSEC) driver.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #ifdef HAVE_KERNEL_OPTION_HEADERS
34 #include "opt_device_polling.h"
37 #include <sys/param.h>
38 #include <sys/systm.h>
40 #include <sys/bus_dma.h>
41 #include <sys/endian.h>
43 #include <sys/kernel.h>
44 #include <sys/module.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
50 #include <net/ethernet.h>
52 #include <net/if_var.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
63 #include <machine/bus.h>
65 #include <dev/mii/mii.h>
66 #include <dev/mii/miivar.h>
68 #include <dev/tsec/if_tsec.h>
69 #include <dev/tsec/if_tsecreg.h>
71 static int tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag,
72 bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr,
74 static void tsec_dma_ctl(struct tsec_softc *sc, int state);
75 static void tsec_encap(struct ifnet *ifp, struct tsec_softc *sc,
76 struct mbuf *m0, uint16_t fcb_flags, int *start_tx);
77 static void tsec_free_dma(struct tsec_softc *sc);
78 static void tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr);
79 static int tsec_ifmedia_upd(struct ifnet *ifp);
80 static void tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
81 static int tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
82 struct mbuf **mbufp, uint32_t *paddr);
83 static void tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs,
85 static void tsec_intrs_ctl(struct tsec_softc *sc, int state);
86 static void tsec_init(void *xsc);
87 static void tsec_init_locked(struct tsec_softc *sc);
88 static int tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
89 static void tsec_reset_mac(struct tsec_softc *sc);
90 static void tsec_setfilter(struct tsec_softc *sc);
91 static void tsec_set_mac_address(struct tsec_softc *sc);
92 static void tsec_start(struct ifnet *ifp);
93 static void tsec_start_locked(struct ifnet *ifp);
94 static void tsec_stop(struct tsec_softc *sc);
95 static void tsec_tick(void *arg);
96 static void tsec_watchdog(struct tsec_softc *sc);
97 static void tsec_add_sysctls(struct tsec_softc *sc);
98 static int tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS);
99 static int tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS);
100 static void tsec_set_rxic(struct tsec_softc *sc);
101 static void tsec_set_txic(struct tsec_softc *sc);
102 static int tsec_receive_intr_locked(struct tsec_softc *sc, int count);
103 static void tsec_transmit_intr_locked(struct tsec_softc *sc);
104 static void tsec_error_intr_locked(struct tsec_softc *sc, int count);
105 static void tsec_offload_setup(struct tsec_softc *sc);
106 static void tsec_offload_process_frame(struct tsec_softc *sc,
108 static void tsec_setup_multicast(struct tsec_softc *sc);
109 static int tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu);
111 devclass_t tsec_devclass;
112 DRIVER_MODULE(miibus, tsec, miibus_driver, miibus_devclass, 0, 0);
113 MODULE_DEPEND(tsec, ether, 1, 1, 1);
114 MODULE_DEPEND(tsec, miibus, 1, 1, 1);
116 struct mtx tsec_phy_mtx;
119 tsec_attach(struct tsec_softc *sc)
121 uint8_t hwaddr[ETHER_ADDR_LEN];
126 /* Initialize global (because potentially shared) MII lock */
127 if (!mtx_initialized(&tsec_phy_mtx))
128 mtx_init(&tsec_phy_mtx, "tsec mii", NULL, MTX_DEF);
130 /* Reset all TSEC counters */
131 TSEC_TX_RX_COUNTERS_INIT(sc);
133 /* Stop DMA engine if enabled by firmware */
139 /* Disable interrupts for now */
140 tsec_intrs_ctl(sc, 0);
142 /* Configure defaults for interrupts coalescing */
143 sc->rx_ic_time = 768;
144 sc->rx_ic_count = 16;
145 sc->tx_ic_time = 768;
146 sc->tx_ic_count = 16;
149 tsec_add_sysctls(sc);
151 /* Allocate a busdma tag and DMA safe memory for TX descriptors. */
152 error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_tx_dtag,
153 &sc->tsec_tx_dmap, sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC,
154 (void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX");
161 /* Allocate a busdma tag and DMA safe memory for RX descriptors. */
162 error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_rx_dtag,
163 &sc->tsec_rx_dmap, sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC,
164 (void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX");
170 /* Allocate a busdma tag for TX mbufs. */
171 error = bus_dma_tag_create(NULL, /* parent */
172 TSEC_TXBUFFER_ALIGNMENT, 0, /* alignment, boundary */
173 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
174 BUS_SPACE_MAXADDR, /* highaddr */
175 NULL, NULL, /* filtfunc, filtfuncarg */
176 MCLBYTES * (TSEC_TX_NUM_DESC - 1), /* maxsize */
177 TSEC_TX_MAX_DMA_SEGS, /* nsegments */
178 MCLBYTES, 0, /* maxsegsz, flags */
179 NULL, NULL, /* lockfunc, lockfuncarg */
180 &sc->tsec_tx_mtag); /* dmat */
182 device_printf(sc->dev, "failed to allocate busdma tag "
188 /* Allocate a busdma tag for RX mbufs. */
189 error = bus_dma_tag_create(NULL, /* parent */
190 TSEC_RXBUFFER_ALIGNMENT, 0, /* alignment, boundary */
191 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
192 BUS_SPACE_MAXADDR, /* highaddr */
193 NULL, NULL, /* filtfunc, filtfuncarg */
194 MCLBYTES, /* maxsize */
196 MCLBYTES, 0, /* maxsegsz, flags */
197 NULL, NULL, /* lockfunc, lockfuncarg */
198 &sc->tsec_rx_mtag); /* dmat */
200 device_printf(sc->dev, "failed to allocate busdma tag "
206 /* Create TX busdma maps */
207 for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
208 error = bus_dmamap_create(sc->tsec_tx_mtag, 0,
209 &sc->tx_bufmap[i].map);
211 device_printf(sc->dev, "failed to init TX ring\n");
215 sc->tx_bufmap[i].map_initialized = 1;
218 /* Create RX busdma maps and zero mbuf handlers */
219 for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
220 error = bus_dmamap_create(sc->tsec_rx_mtag, 0,
221 &sc->rx_data[i].map);
223 device_printf(sc->dev, "failed to init RX ring\n");
227 sc->rx_data[i].mbuf = NULL;
230 /* Create mbufs for RX buffers */
231 for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
232 error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map,
233 &sc->rx_data[i].mbuf, &sc->rx_data[i].paddr);
235 device_printf(sc->dev, "can't load rx DMA map %d, "
236 "error = %d\n", i, error);
242 /* Create network interface for upper layers */
243 ifp = sc->tsec_ifp = if_alloc(IFT_ETHER);
245 device_printf(sc->dev, "if_alloc() failed\n");
251 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
252 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
253 ifp->if_init = tsec_init;
254 ifp->if_start = tsec_start;
255 ifp->if_ioctl = tsec_ioctl;
257 IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1);
258 ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1;
259 IFQ_SET_READY(&ifp->if_snd);
261 ifp->if_capabilities = IFCAP_VLAN_MTU;
263 ifp->if_capabilities |= IFCAP_HWCSUM;
265 ifp->if_capenable = ifp->if_capabilities;
267 #ifdef DEVICE_POLLING
268 /* Advertise that polling is supported */
269 ifp->if_capabilities |= IFCAP_POLLING;
273 error = mii_attach(sc->dev, &sc->tsec_miibus, ifp, tsec_ifmedia_upd,
274 tsec_ifmedia_sts, BMSR_DEFCAPMASK, sc->phyaddr, MII_OFFSET_ANY,
277 device_printf(sc->dev, "attaching PHYs failed\n");
283 sc->tsec_mii = device_get_softc(sc->tsec_miibus);
285 /* Set MAC address */
286 tsec_get_hwaddr(sc, hwaddr);
287 ether_ifattach(ifp, hwaddr);
293 tsec_detach(struct tsec_softc *sc)
296 if (sc->tsec_ifp != NULL) {
297 #ifdef DEVICE_POLLING
298 if (sc->tsec_ifp->if_capenable & IFCAP_POLLING)
299 ether_poll_deregister(sc->tsec_ifp);
302 /* Stop TSEC controller and free TX queue */
304 tsec_shutdown(sc->dev);
306 /* Detach network interface */
307 ether_ifdetach(sc->tsec_ifp);
308 if_free(sc->tsec_ifp);
312 /* Free DMA resources */
319 tsec_shutdown(device_t dev)
321 struct tsec_softc *sc;
323 sc = device_get_softc(dev);
325 TSEC_GLOBAL_LOCK(sc);
327 TSEC_GLOBAL_UNLOCK(sc);
332 tsec_suspend(device_t dev)
335 /* TODO not implemented! */
340 tsec_resume(device_t dev)
343 /* TODO not implemented! */
350 struct tsec_softc *sc = xsc;
352 TSEC_GLOBAL_LOCK(sc);
353 tsec_init_locked(sc);
354 TSEC_GLOBAL_UNLOCK(sc);
358 tsec_mii_wait(struct tsec_softc *sc, uint32_t flags)
363 * The status indicators are not set immediatly after a command.
364 * Discard the first value.
366 TSEC_PHY_READ(sc, TSEC_REG_MIIMIND);
368 timeout = TSEC_READ_RETRY;
369 while ((TSEC_PHY_READ(sc, TSEC_REG_MIIMIND) & flags) && --timeout)
370 DELAY(TSEC_READ_DELAY);
372 return (timeout == 0);
377 tsec_init_locked(struct tsec_softc *sc)
379 struct tsec_desc *tx_desc = sc->tsec_tx_vaddr;
380 struct tsec_desc *rx_desc = sc->tsec_rx_vaddr;
381 struct ifnet *ifp = sc->tsec_ifp;
385 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
388 TSEC_GLOBAL_LOCK_ASSERT(sc);
392 * These steps are according to the MPC8555E PowerQUICCIII RM:
393 * 14.7 Initialization/Application Information
396 /* Step 1: soft reset MAC */
399 /* Step 2: Initialize MACCFG2 */
400 TSEC_WRITE(sc, TSEC_REG_MACCFG2,
401 TSEC_MACCFG2_FULLDUPLEX | /* Full Duplex = 1 */
402 TSEC_MACCFG2_PADCRC | /* PAD/CRC append */
403 TSEC_MACCFG2_GMII | /* I/F Mode bit */
404 TSEC_MACCFG2_PRECNT /* Preamble count = 7 */
407 /* Step 3: Initialize ECNTRL
408 * While the documentation states that R100M is ignored if RPM is
409 * not set, it does seem to be needed to get the orange boxes to
410 * work (which have a Marvell 88E1111 PHY). Go figure.
414 * XXX kludge - use circumstancial evidence to program ECNTRL
415 * correctly. Ideally we need some board information to guide
418 i = TSEC_READ(sc, TSEC_REG_ID2);
420 ? (TSEC_ECNTRL_TBIM | TSEC_ECNTRL_SGMIIM) /* Sumatra */
421 : TSEC_ECNTRL_R100M; /* Orange + CDS */
422 TSEC_WRITE(sc, TSEC_REG_ECNTRL, TSEC_ECNTRL_STEN | val);
424 /* Step 4: Initialize MAC station address */
425 tsec_set_mac_address(sc);
428 * Step 5: Assign a Physical address to the TBI so as to not conflict
429 * with the external PHY physical address
431 TSEC_WRITE(sc, TSEC_REG_TBIPA, 5);
435 /* Step 6: Reset the management interface */
436 TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_RESETMGMT);
438 /* Step 7: Setup the MII Mgmt clock speed */
439 TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_CLKDIV28);
441 /* Step 8: Read MII Mgmt indicator register and check for Busy = 0 */
442 timeout = tsec_mii_wait(sc, TSEC_MIIMIND_BUSY);
446 if_printf(ifp, "tsec_init_locked(): Mgmt busy timeout\n");
450 /* Step 9: Setup the MII Mgmt */
451 mii_mediachg(sc->tsec_mii);
453 /* Step 10: Clear IEVENT register */
454 TSEC_WRITE(sc, TSEC_REG_IEVENT, 0xffffffff);
456 /* Step 11: Enable interrupts */
457 #ifdef DEVICE_POLLING
459 * ...only if polling is not turned on. Disable interrupts explicitly
460 * if polling is enabled.
462 if (ifp->if_capenable & IFCAP_POLLING )
463 tsec_intrs_ctl(sc, 0);
465 #endif /* DEVICE_POLLING */
466 tsec_intrs_ctl(sc, 1);
468 /* Step 12: Initialize IADDRn */
469 TSEC_WRITE(sc, TSEC_REG_IADDR0, 0);
470 TSEC_WRITE(sc, TSEC_REG_IADDR1, 0);
471 TSEC_WRITE(sc, TSEC_REG_IADDR2, 0);
472 TSEC_WRITE(sc, TSEC_REG_IADDR3, 0);
473 TSEC_WRITE(sc, TSEC_REG_IADDR4, 0);
474 TSEC_WRITE(sc, TSEC_REG_IADDR5, 0);
475 TSEC_WRITE(sc, TSEC_REG_IADDR6, 0);
476 TSEC_WRITE(sc, TSEC_REG_IADDR7, 0);
478 /* Step 13: Initialize GADDRn */
479 TSEC_WRITE(sc, TSEC_REG_GADDR0, 0);
480 TSEC_WRITE(sc, TSEC_REG_GADDR1, 0);
481 TSEC_WRITE(sc, TSEC_REG_GADDR2, 0);
482 TSEC_WRITE(sc, TSEC_REG_GADDR3, 0);
483 TSEC_WRITE(sc, TSEC_REG_GADDR4, 0);
484 TSEC_WRITE(sc, TSEC_REG_GADDR5, 0);
485 TSEC_WRITE(sc, TSEC_REG_GADDR6, 0);
486 TSEC_WRITE(sc, TSEC_REG_GADDR7, 0);
488 /* Step 14: Initialize RCTRL */
489 TSEC_WRITE(sc, TSEC_REG_RCTRL, 0);
491 /* Step 15: Initialize DMACTRL */
494 /* Step 16: Initialize FIFO_PAUSE_CTRL */
495 TSEC_WRITE(sc, TSEC_REG_FIFO_PAUSE_CTRL, TSEC_FIFO_PAUSE_CTRL_EN);
498 * Step 17: Initialize transmit/receive descriptor rings.
499 * Initialize TBASE and RBASE.
501 TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr);
502 TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr);
504 for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
505 tx_desc[i].bufptr = 0;
506 tx_desc[i].length = 0;
507 tx_desc[i].flags = ((i == TSEC_TX_NUM_DESC - 1) ?
510 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
511 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
513 for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
514 rx_desc[i].bufptr = sc->rx_data[i].paddr;
515 rx_desc[i].length = 0;
516 rx_desc[i].flags = TSEC_RXBD_E | TSEC_RXBD_I |
517 ((i == TSEC_RX_NUM_DESC - 1) ? TSEC_RXBD_W : 0);
519 bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
520 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
522 /* Step 18: Initialize the maximum receive buffer length */
523 TSEC_WRITE(sc, TSEC_REG_MRBLR, MCLBYTES);
525 /* Step 19: Configure ethernet frame sizes */
526 TSEC_WRITE(sc, TSEC_REG_MINFLR, TSEC_MIN_FRAME_SIZE);
527 tsec_set_mtu(sc, ifp->if_mtu);
529 /* Step 20: Enable Rx and RxBD sdata snooping */
530 TSEC_WRITE(sc, TSEC_REG_ATTR, TSEC_ATTR_RDSEN | TSEC_ATTR_RBDSEN);
531 TSEC_WRITE(sc, TSEC_REG_ATTRELI, 0);
533 /* Step 21: Reset collision counters in hardware */
534 TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
535 TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
536 TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
537 TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
538 TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
540 /* Step 22: Mask all CAM interrupts */
541 TSEC_WRITE(sc, TSEC_REG_MON_CAM1, 0xffffffff);
542 TSEC_WRITE(sc, TSEC_REG_MON_CAM2, 0xffffffff);
544 /* Step 23: Enable Rx and Tx */
545 val = TSEC_READ(sc, TSEC_REG_MACCFG1);
546 val |= (TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
547 TSEC_WRITE(sc, TSEC_REG_MACCFG1, val);
549 /* Step 24: Reset TSEC counters for Tx and Rx rings */
550 TSEC_TX_RX_COUNTERS_INIT(sc);
552 /* Step 25: Setup TCP/IP Off-Load engine */
554 tsec_offload_setup(sc);
556 /* Step 26: Setup multicast filters */
557 tsec_setup_multicast(sc);
559 /* Step 27: Activate network interface */
560 ifp->if_drv_flags |= IFF_DRV_RUNNING;
561 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
562 sc->tsec_if_flags = ifp->if_flags;
563 sc->tsec_watchdog = 0;
565 /* Schedule watchdog timeout */
566 callout_reset(&sc->tsec_callout, hz, tsec_tick, sc);
570 tsec_set_mac_address(struct tsec_softc *sc)
572 uint32_t macbuf[2] = { 0, 0 };
573 char *macbufp, *curmac;
576 TSEC_GLOBAL_LOCK_ASSERT(sc);
578 KASSERT((ETHER_ADDR_LEN <= sizeof(macbuf)),
579 ("tsec_set_mac_address: (%d <= %zd", ETHER_ADDR_LEN,
582 macbufp = (char *)macbuf;
583 curmac = (char *)IF_LLADDR(sc->tsec_ifp);
585 /* Correct order of MAC address bytes */
586 for (i = 1; i <= ETHER_ADDR_LEN; i++)
587 macbufp[ETHER_ADDR_LEN-i] = curmac[i-1];
589 /* Initialize MAC station address MACSTNADDR2 and MACSTNADDR1 */
590 TSEC_WRITE(sc, TSEC_REG_MACSTNADDR2, macbuf[1]);
591 TSEC_WRITE(sc, TSEC_REG_MACSTNADDR1, macbuf[0]);
595 * DMA control function, if argument state is:
596 * 0 - DMA engine will be disabled
597 * 1 - DMA engine will be enabled
600 tsec_dma_ctl(struct tsec_softc *sc, int state)
603 uint32_t dma_flags, timeout;
607 dma_flags = TSEC_READ(sc, TSEC_REG_DMACTRL);
611 /* Temporarily clear stop graceful stop bits. */
612 tsec_dma_ctl(sc, 1000);
615 dma_flags |= (TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
619 /* Set write with response (WWR), wait (WOP) and snoop bits */
620 dma_flags |= (TSEC_DMACTRL_TDSEN | TSEC_DMACTRL_TBDSEN |
621 DMACTRL_WWR | DMACTRL_WOP);
623 /* Clear graceful stop bits */
624 dma_flags &= ~(TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
627 device_printf(dev, "tsec_dma_ctl(): unknown state value: %d\n",
631 TSEC_WRITE(sc, TSEC_REG_DMACTRL, dma_flags);
635 /* Wait for DMA stop */
636 timeout = TSEC_READ_RETRY;
637 while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) &
638 (TSEC_IEVENT_GRSC | TSEC_IEVENT_GTSC))))
639 DELAY(TSEC_READ_DELAY);
642 device_printf(dev, "tsec_dma_ctl(): timeout!\n");
645 /* Restart transmission function */
646 TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
651 * Interrupts control function, if argument state is:
652 * 0 - all TSEC interrupts will be masked
653 * 1 - all TSEC interrupts will be unmasked
656 tsec_intrs_ctl(struct tsec_softc *sc, int state)
664 TSEC_WRITE(sc, TSEC_REG_IMASK, 0);
667 TSEC_WRITE(sc, TSEC_REG_IMASK, TSEC_IMASK_BREN |
668 TSEC_IMASK_RXCEN | TSEC_IMASK_BSYEN | TSEC_IMASK_EBERREN |
669 TSEC_IMASK_BTEN | TSEC_IMASK_TXEEN | TSEC_IMASK_TXBEN |
670 TSEC_IMASK_TXFEN | TSEC_IMASK_XFUNEN | TSEC_IMASK_RXFEN);
673 device_printf(dev, "tsec_intrs_ctl(): unknown state value: %d\n",
679 tsec_reset_mac(struct tsec_softc *sc)
681 uint32_t maccfg1_flags;
683 /* Set soft reset bit */
684 maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
685 maccfg1_flags |= TSEC_MACCFG1_SOFT_RESET;
686 TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
688 /* Clear soft reset bit */
689 maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
690 maccfg1_flags &= ~TSEC_MACCFG1_SOFT_RESET;
691 TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
695 tsec_watchdog(struct tsec_softc *sc)
699 TSEC_GLOBAL_LOCK_ASSERT(sc);
701 if (sc->tsec_watchdog == 0 || --sc->tsec_watchdog > 0)
705 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
706 if_printf(ifp, "watchdog timeout\n");
709 tsec_init_locked(sc);
713 tsec_start(struct ifnet *ifp)
715 struct tsec_softc *sc = ifp->if_softc;
717 TSEC_TRANSMIT_LOCK(sc);
718 tsec_start_locked(ifp);
719 TSEC_TRANSMIT_UNLOCK(sc);
723 tsec_start_locked(struct ifnet *ifp)
725 struct tsec_softc *sc;
727 struct tsec_tx_fcb *tx_fcb;
735 TSEC_TRANSMIT_LOCK_ASSERT(sc);
737 if (sc->tsec_link == 0)
740 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
741 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
745 if (TSEC_FREE_TX_DESC(sc) < TSEC_TX_MAX_DMA_SEGS) {
746 /* No free descriptors */
747 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
751 /* Get packet from the queue */
752 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
756 /* Insert TCP/IP Off-load frame control block */
758 csum_flags = m0->m_pkthdr.csum_flags;
760 M_PREPEND(m0, sizeof(struct tsec_tx_fcb), M_NOWAIT);
764 if (csum_flags & CSUM_IP)
765 fcb_flags |= TSEC_TX_FCB_IP4 |
768 if (csum_flags & CSUM_TCP)
769 fcb_flags |= TSEC_TX_FCB_TCP |
770 TSEC_TX_FCB_CSUM_TCP_UDP;
772 if (csum_flags & CSUM_UDP)
773 fcb_flags |= TSEC_TX_FCB_UDP |
774 TSEC_TX_FCB_CSUM_TCP_UDP;
776 tx_fcb = mtod(m0, struct tsec_tx_fcb *);
777 tx_fcb->flags = fcb_flags;
778 tx_fcb->l3_offset = ETHER_HDR_LEN;
779 tx_fcb->l4_offset = sizeof(struct ip);
782 tsec_encap(ifp, sc, m0, fcb_flags, &start_tx);
784 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
785 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
788 /* Enable transmitter and watchdog timer */
789 TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
790 sc->tsec_watchdog = 5;
795 tsec_encap(struct ifnet *ifp, struct tsec_softc *sc, struct mbuf *m0,
796 uint16_t fcb_flags, int *start_tx)
798 bus_dma_segment_t segs[TSEC_TX_MAX_DMA_SEGS];
800 struct tsec_bufmap *tx_bufmap;
804 TSEC_TRANSMIT_LOCK_ASSERT(sc);
806 tx_idx = sc->tx_idx_head;
807 tx_bufmap = &sc->tx_bufmap[tx_idx];
809 /* Create mapping in DMA memory */
810 error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag, tx_bufmap->map, m0,
811 segs, &nsegs, BUS_DMA_NOWAIT);
812 if (error == EFBIG) {
813 /* Too many segments! Defrag and try again. */
814 struct mbuf *m = m_defrag(m0, M_NOWAIT);
821 error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag,
822 tx_bufmap->map, m0, segs, &nsegs, BUS_DMA_NOWAIT);
830 bus_dmamap_sync(sc->tsec_tx_mtag, tx_bufmap->map,
831 BUS_DMASYNC_PREWRITE);
832 tx_bufmap->mbuf = m0;
835 * Fill in the TX descriptors back to front so that READY bit in first
836 * descriptor is set last.
838 tx_idx = (tx_idx + (uint32_t)nsegs) & (TSEC_TX_NUM_DESC - 1);
839 sc->tx_idx_head = tx_idx;
840 flags = TSEC_TXBD_L | TSEC_TXBD_I | TSEC_TXBD_R | TSEC_TXBD_TC;
841 for (i = nsegs - 1; i >= 0; i--) {
842 struct tsec_desc *tx_desc;
844 tx_idx = (tx_idx - 1) & (TSEC_TX_NUM_DESC - 1);
845 tx_desc = &sc->tsec_tx_vaddr[tx_idx];
846 tx_desc->length = segs[i].ds_len;
847 tx_desc->bufptr = segs[i].ds_addr;
853 flags |= TSEC_TXBD_TOE;
861 * - transmit the CRC sequence after the last data byte
862 * - interrupt after the last buffer
864 tx_desc->flags = (tx_idx == (TSEC_TX_NUM_DESC - 1) ?
865 TSEC_TXBD_W : 0) | flags;
867 flags &= ~(TSEC_TXBD_L | TSEC_TXBD_I);
875 tsec_setfilter(struct tsec_softc *sc)
881 flags = TSEC_READ(sc, TSEC_REG_RCTRL);
883 /* Promiscuous mode */
884 if (ifp->if_flags & IFF_PROMISC)
885 flags |= TSEC_RCTRL_PROM;
887 flags &= ~TSEC_RCTRL_PROM;
889 TSEC_WRITE(sc, TSEC_REG_RCTRL, flags);
892 #ifdef DEVICE_POLLING
893 static poll_handler_t tsec_poll;
896 tsec_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
899 struct tsec_softc *sc = ifp->if_softc;
904 TSEC_GLOBAL_LOCK(sc);
905 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
906 TSEC_GLOBAL_UNLOCK(sc);
910 if (cmd == POLL_AND_CHECK_STATUS) {
911 tsec_error_intr_locked(sc, count);
913 /* Clear all events reported */
914 ie = TSEC_READ(sc, TSEC_REG_IEVENT);
915 TSEC_WRITE(sc, TSEC_REG_IEVENT, ie);
918 tsec_transmit_intr_locked(sc);
920 TSEC_GLOBAL_TO_RECEIVE_LOCK(sc);
922 rx_npkts = tsec_receive_intr_locked(sc, count);
924 TSEC_RECEIVE_UNLOCK(sc);
928 #endif /* DEVICE_POLLING */
931 tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
933 struct tsec_softc *sc = ifp->if_softc;
934 struct ifreq *ifr = (struct ifreq *)data;
939 TSEC_GLOBAL_LOCK(sc);
940 if (tsec_set_mtu(sc, ifr->ifr_mtu))
941 ifp->if_mtu = ifr->ifr_mtu;
944 TSEC_GLOBAL_UNLOCK(sc);
947 TSEC_GLOBAL_LOCK(sc);
948 if (ifp->if_flags & IFF_UP) {
949 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
950 if ((sc->tsec_if_flags ^ ifp->if_flags) &
954 if ((sc->tsec_if_flags ^ ifp->if_flags) &
956 tsec_setup_multicast(sc);
958 tsec_init_locked(sc);
959 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
962 sc->tsec_if_flags = ifp->if_flags;
963 TSEC_GLOBAL_UNLOCK(sc);
967 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
968 TSEC_GLOBAL_LOCK(sc);
969 tsec_setup_multicast(sc);
970 TSEC_GLOBAL_UNLOCK(sc);
974 error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media,
978 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
979 if ((mask & IFCAP_HWCSUM) && sc->is_etsec) {
980 TSEC_GLOBAL_LOCK(sc);
981 ifp->if_capenable &= ~IFCAP_HWCSUM;
982 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
983 tsec_offload_setup(sc);
984 TSEC_GLOBAL_UNLOCK(sc);
986 #ifdef DEVICE_POLLING
987 if (mask & IFCAP_POLLING) {
988 if (ifr->ifr_reqcap & IFCAP_POLLING) {
989 error = ether_poll_register(tsec_poll, ifp);
993 TSEC_GLOBAL_LOCK(sc);
994 /* Disable interrupts */
995 tsec_intrs_ctl(sc, 0);
996 ifp->if_capenable |= IFCAP_POLLING;
997 TSEC_GLOBAL_UNLOCK(sc);
999 error = ether_poll_deregister(ifp);
1000 TSEC_GLOBAL_LOCK(sc);
1001 /* Enable interrupts */
1002 tsec_intrs_ctl(sc, 1);
1003 ifp->if_capenable &= ~IFCAP_POLLING;
1004 TSEC_GLOBAL_UNLOCK(sc);
1011 error = ether_ioctl(ifp, command, data);
1014 /* Flush buffers if not empty */
1015 if (ifp->if_flags & IFF_UP)
1021 tsec_ifmedia_upd(struct ifnet *ifp)
1023 struct tsec_softc *sc = ifp->if_softc;
1024 struct mii_data *mii;
1026 TSEC_TRANSMIT_LOCK(sc);
1031 TSEC_TRANSMIT_UNLOCK(sc);
1036 tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1038 struct tsec_softc *sc = ifp->if_softc;
1039 struct mii_data *mii;
1041 TSEC_TRANSMIT_LOCK(sc);
1046 ifmr->ifm_active = mii->mii_media_active;
1047 ifmr->ifm_status = mii->mii_media_status;
1049 TSEC_TRANSMIT_UNLOCK(sc);
1053 tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
1056 struct mbuf *new_mbuf;
1057 bus_dma_segment_t seg[1];
1060 KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
1062 new_mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MCLBYTES);
1063 if (new_mbuf == NULL)
1065 new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
1068 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
1069 bus_dmamap_unload(tag, map);
1072 error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
1074 KASSERT(nsegs == 1, ("Too many segments returned!"));
1075 if (nsegs != 1 || error)
1076 panic("tsec_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
1080 printf("tsec: bus_dmamap_load_mbuf_sg() returned: %d!\n",
1088 KASSERT(((seg->ds_addr) & (TSEC_RXBUFFER_ALIGNMENT-1)) == 0,
1089 ("Wrong alignment of RX buffer!"));
1091 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
1093 (*mbufp) = new_mbuf;
1094 (*paddr) = seg->ds_addr;
1099 tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1103 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
1105 *paddr = segs->ds_addr;
1109 tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap,
1110 bus_size_t dsize, void **vaddr, void *raddr, const char *dname)
1114 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
1115 error = bus_dma_tag_create(NULL, /* parent */
1116 PAGE_SIZE, 0, /* alignment, boundary */
1117 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1118 BUS_SPACE_MAXADDR, /* highaddr */
1119 NULL, NULL, /* filtfunc, filtfuncarg */
1120 dsize, 1, /* maxsize, nsegments */
1121 dsize, 0, /* maxsegsz, flags */
1122 NULL, NULL, /* lockfunc, lockfuncarg */
1126 device_printf(dev, "failed to allocate busdma %s tag\n",
1132 error = bus_dmamem_alloc(*dtag, vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1135 device_printf(dev, "failed to allocate %s DMA safe memory\n",
1137 bus_dma_tag_destroy(*dtag);
1142 error = bus_dmamap_load(*dtag, *dmap, *vaddr, dsize,
1143 tsec_map_dma_addr, raddr, BUS_DMA_NOWAIT);
1145 device_printf(dev, "cannot get address of the %s "
1146 "descriptors\n", dname);
1147 bus_dmamem_free(*dtag, *vaddr, *dmap);
1148 bus_dma_tag_destroy(*dtag);
1157 tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr)
1163 /* Unmap descriptors from DMA memory */
1164 bus_dmamap_sync(dtag, dmap, BUS_DMASYNC_POSTREAD |
1165 BUS_DMASYNC_POSTWRITE);
1166 bus_dmamap_unload(dtag, dmap);
1168 /* Free descriptors memory */
1169 bus_dmamem_free(dtag, vaddr, dmap);
1171 /* Destroy descriptors tag */
1172 bus_dma_tag_destroy(dtag);
1176 tsec_free_dma(struct tsec_softc *sc)
1181 for (i = 0; i < TSEC_TX_NUM_DESC; i++)
1182 if (sc->tx_bufmap[i].map_initialized)
1183 bus_dmamap_destroy(sc->tsec_tx_mtag,
1184 sc->tx_bufmap[i].map);
1185 /* Destroy tag for TX mbufs */
1186 bus_dma_tag_destroy(sc->tsec_tx_mtag);
1188 /* Free RX mbufs and maps */
1189 for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
1190 if (sc->rx_data[i].mbuf) {
1191 /* Unload buffer from DMA */
1192 bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map,
1193 BUS_DMASYNC_POSTREAD);
1194 bus_dmamap_unload(sc->tsec_rx_mtag,
1195 sc->rx_data[i].map);
1198 m_freem(sc->rx_data[i].mbuf);
1200 /* Destroy map for this buffer */
1201 if (sc->rx_data[i].map != NULL)
1202 bus_dmamap_destroy(sc->tsec_rx_mtag,
1203 sc->rx_data[i].map);
1205 /* Destroy tag for RX mbufs */
1206 bus_dma_tag_destroy(sc->tsec_rx_mtag);
1208 /* Unload TX/RX descriptors */
1209 tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
1211 tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
1216 tsec_stop(struct tsec_softc *sc)
1221 TSEC_GLOBAL_LOCK_ASSERT(sc);
1225 /* Disable interface and watchdog timer */
1226 callout_stop(&sc->tsec_callout);
1227 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1228 sc->tsec_watchdog = 0;
1230 /* Disable all interrupts and stop DMA */
1231 tsec_intrs_ctl(sc, 0);
1232 tsec_dma_ctl(sc, 0);
1234 /* Remove pending data from TX queue */
1235 while (sc->tx_idx_tail != sc->tx_idx_head) {
1236 bus_dmamap_sync(sc->tsec_tx_mtag,
1237 sc->tx_bufmap[sc->tx_idx_tail].map,
1238 BUS_DMASYNC_POSTWRITE);
1239 bus_dmamap_unload(sc->tsec_tx_mtag,
1240 sc->tx_bufmap[sc->tx_idx_tail].map);
1241 m_freem(sc->tx_bufmap[sc->tx_idx_tail].mbuf);
1242 sc->tx_idx_tail = (sc->tx_idx_tail + 1)
1243 & (TSEC_TX_NUM_DESC - 1);
1246 /* Disable RX and TX */
1247 tmpval = TSEC_READ(sc, TSEC_REG_MACCFG1);
1248 tmpval &= ~(TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
1249 TSEC_WRITE(sc, TSEC_REG_MACCFG1, tmpval);
1254 tsec_tick(void *arg)
1256 struct tsec_softc *sc = arg;
1260 TSEC_GLOBAL_LOCK(sc);
1265 link = sc->tsec_link;
1267 mii_tick(sc->tsec_mii);
1269 if (link == 0 && sc->tsec_link == 1 &&
1270 (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)))
1271 tsec_start_locked(ifp);
1273 /* Schedule another timeout one second from now. */
1274 callout_reset(&sc->tsec_callout, hz, tsec_tick, sc);
1276 TSEC_GLOBAL_UNLOCK(sc);
1280 * This is the core RX routine. It replenishes mbufs in the descriptor and
1281 * sends data which have been dma'ed into host memory to upper layer.
1283 * Loops at most count times if count is > 0, or until done if count < 0.
1286 tsec_receive_intr_locked(struct tsec_softc *sc, int count)
1288 struct tsec_desc *rx_desc;
1290 struct rx_data_type *rx_data;
1296 TSEC_RECEIVE_LOCK_ASSERT(sc);
1299 rx_data = sc->rx_data;
1302 bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
1303 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1305 for (c = 0; ; c++) {
1306 if (count >= 0 && count-- == 0)
1309 rx_desc = TSEC_GET_CUR_RX_DESC(sc);
1310 flags = rx_desc->flags;
1312 /* Check if there is anything to receive */
1313 if ((flags & TSEC_RXBD_E) || (c >= TSEC_RX_NUM_DESC)) {
1315 * Avoid generating another interrupt
1317 if (flags & TSEC_RXBD_E)
1318 TSEC_WRITE(sc, TSEC_REG_IEVENT,
1319 TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
1321 * We didn't consume current descriptor and have to
1322 * return it to the queue
1324 TSEC_BACK_CUR_RX_DESC(sc);
1328 if (flags & (TSEC_RXBD_LG | TSEC_RXBD_SH | TSEC_RXBD_NO |
1329 TSEC_RXBD_CR | TSEC_RXBD_OV | TSEC_RXBD_TR)) {
1331 rx_desc->length = 0;
1332 rx_desc->flags = (rx_desc->flags &
1333 ~TSEC_RXBD_ZEROONINIT) | TSEC_RXBD_E | TSEC_RXBD_I;
1335 if (sc->frame != NULL) {
1343 /* Ok... process frame */
1344 i = TSEC_GET_CUR_RX_DESC_CNT(sc);
1345 m = rx_data[i].mbuf;
1346 m->m_len = rx_desc->length;
1348 if (sc->frame != NULL) {
1349 if ((flags & TSEC_RXBD_L) != 0)
1350 m->m_len -= m_length(sc->frame, NULL);
1352 m->m_flags &= ~M_PKTHDR;
1353 m_cat(sc->frame, m);
1360 if ((flags & TSEC_RXBD_L) != 0) {
1365 if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map,
1366 &rx_data[i].mbuf, &rx_data[i].paddr)) {
1367 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1369 * We ran out of mbufs; didn't consume current
1370 * descriptor and have to return it to the queue.
1372 TSEC_BACK_CUR_RX_DESC(sc);
1376 /* Attach new buffer to descriptor and clear flags */
1377 rx_desc->bufptr = rx_data[i].paddr;
1378 rx_desc->length = 0;
1379 rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) |
1380 TSEC_RXBD_E | TSEC_RXBD_I;
1383 m->m_pkthdr.rcvif = ifp;
1386 m_adj(m, -ETHER_CRC_LEN);
1389 tsec_offload_process_frame(sc, m);
1391 TSEC_RECEIVE_UNLOCK(sc);
1392 (*ifp->if_input)(ifp, m);
1393 TSEC_RECEIVE_LOCK(sc);
1398 bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
1399 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1402 * Make sure TSEC receiver is not halted.
1404 * Various conditions can stop the TSEC receiver, but not all are
1405 * signaled and handled by error interrupt, so make sure the receiver
1406 * is running. Writing to TSEC_REG_RSTAT restarts the receiver when
1407 * halted, and is harmless if already running.
1409 TSEC_WRITE(sc, TSEC_REG_RSTAT, TSEC_RSTAT_QHLT);
1414 tsec_receive_intr(void *arg)
1416 struct tsec_softc *sc = arg;
1418 TSEC_RECEIVE_LOCK(sc);
1420 #ifdef DEVICE_POLLING
1421 if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) {
1422 TSEC_RECEIVE_UNLOCK(sc);
1427 /* Confirm the interrupt was received by driver */
1428 TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
1429 tsec_receive_intr_locked(sc, -1);
1431 TSEC_RECEIVE_UNLOCK(sc);
1435 tsec_transmit_intr_locked(struct tsec_softc *sc)
1440 TSEC_TRANSMIT_LOCK_ASSERT(sc);
1444 /* Update collision statistics */
1445 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, TSEC_READ(sc, TSEC_REG_MON_TNCL));
1447 /* Reset collision counters in hardware */
1448 TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
1449 TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
1450 TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
1451 TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
1452 TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
1454 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
1455 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1457 tx_idx = sc->tx_idx_tail;
1458 while (tx_idx != sc->tx_idx_head) {
1459 struct tsec_desc *tx_desc;
1460 struct tsec_bufmap *tx_bufmap;
1462 tx_desc = &sc->tsec_tx_vaddr[tx_idx];
1463 if (tx_desc->flags & TSEC_TXBD_R) {
1467 tx_bufmap = &sc->tx_bufmap[tx_idx];
1468 tx_idx = (tx_idx + 1) & (TSEC_TX_NUM_DESC - 1);
1469 if (tx_bufmap->mbuf == NULL)
1473 * This is the last buf in this packet, so unmap and free it.
1475 bus_dmamap_sync(sc->tsec_tx_mtag, tx_bufmap->map,
1476 BUS_DMASYNC_POSTWRITE);
1477 bus_dmamap_unload(sc->tsec_tx_mtag, tx_bufmap->map);
1478 m_freem(tx_bufmap->mbuf);
1479 tx_bufmap->mbuf = NULL;
1481 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1483 sc->tx_idx_tail = tx_idx;
1484 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
1485 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1487 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1488 tsec_start_locked(ifp);
1490 if (sc->tx_idx_tail == sc->tx_idx_head)
1491 sc->tsec_watchdog = 0;
1495 tsec_transmit_intr(void *arg)
1497 struct tsec_softc *sc = arg;
1499 TSEC_TRANSMIT_LOCK(sc);
1501 #ifdef DEVICE_POLLING
1502 if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) {
1503 TSEC_TRANSMIT_UNLOCK(sc);
1507 /* Confirm the interrupt was received by driver */
1508 TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_TXB | TSEC_IEVENT_TXF);
1509 tsec_transmit_intr_locked(sc);
1511 TSEC_TRANSMIT_UNLOCK(sc);
1515 tsec_error_intr_locked(struct tsec_softc *sc, int count)
1520 TSEC_GLOBAL_LOCK_ASSERT(sc);
1524 eflags = TSEC_READ(sc, TSEC_REG_IEVENT);
1526 /* Clear events bits in hardware */
1527 TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXC | TSEC_IEVENT_BSY |
1528 TSEC_IEVENT_EBERR | TSEC_IEVENT_MSRO | TSEC_IEVENT_BABT |
1529 TSEC_IEVENT_TXC | TSEC_IEVENT_TXE | TSEC_IEVENT_LC |
1530 TSEC_IEVENT_CRL | TSEC_IEVENT_XFUN);
1532 /* Check transmitter errors */
1533 if (eflags & TSEC_IEVENT_TXE) {
1534 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1536 if (eflags & TSEC_IEVENT_LC)
1537 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1539 TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
1542 /* Check for discarded frame due to a lack of buffers */
1543 if (eflags & TSEC_IEVENT_BSY) {
1544 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1547 if (ifp->if_flags & IFF_DEBUG)
1548 if_printf(ifp, "tsec_error_intr(): event flags: 0x%x\n",
1551 if (eflags & TSEC_IEVENT_EBERR) {
1552 if_printf(ifp, "System bus error occurred during"
1553 "DMA transaction (flags: 0x%x)\n", eflags);
1554 tsec_init_locked(sc);
1557 if (eflags & TSEC_IEVENT_BABT)
1558 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1560 if (eflags & TSEC_IEVENT_BABR)
1561 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1565 tsec_error_intr(void *arg)
1567 struct tsec_softc *sc = arg;
1569 TSEC_GLOBAL_LOCK(sc);
1570 tsec_error_intr_locked(sc, -1);
1571 TSEC_GLOBAL_UNLOCK(sc);
1575 tsec_miibus_readreg(device_t dev, int phy, int reg)
1577 struct tsec_softc *sc;
1581 sc = device_get_softc(dev);
1584 TSEC_PHY_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
1585 TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCOM, 0);
1586 TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCOM, TSEC_MIIMCOM_READCYCLE);
1588 timeout = tsec_mii_wait(sc, TSEC_MIIMIND_NOTVALID | TSEC_MIIMIND_BUSY);
1589 rv = TSEC_PHY_READ(sc, TSEC_REG_MIIMSTAT);
1593 device_printf(dev, "Timeout while reading from PHY!\n");
1599 tsec_miibus_writereg(device_t dev, int phy, int reg, int value)
1601 struct tsec_softc *sc;
1604 sc = device_get_softc(dev);
1607 TSEC_PHY_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
1608 TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCON, value);
1609 timeout = tsec_mii_wait(sc, TSEC_MIIMIND_BUSY);
1613 device_printf(dev, "Timeout while writing to PHY!\n");
1619 tsec_miibus_statchg(device_t dev)
1621 struct tsec_softc *sc;
1622 struct mii_data *mii;
1623 uint32_t ecntrl, id, tmp;
1626 sc = device_get_softc(dev);
1628 link = ((mii->mii_media_status & IFM_ACTIVE) ? 1 : 0);
1630 tmp = TSEC_READ(sc, TSEC_REG_MACCFG2) & ~TSEC_MACCFG2_IF;
1632 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
1633 tmp |= TSEC_MACCFG2_FULLDUPLEX;
1635 tmp &= ~TSEC_MACCFG2_FULLDUPLEX;
1637 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1640 tmp |= TSEC_MACCFG2_GMII;
1641 sc->tsec_link = link;
1645 tmp |= TSEC_MACCFG2_MII;
1646 sc->tsec_link = link;
1650 device_printf(dev, "No speed selected but link "
1656 device_printf(dev, "Unknown speed (%d), link %s!\n",
1657 IFM_SUBTYPE(mii->mii_media_active),
1658 ((link) ? "up" : "down"));
1661 TSEC_WRITE(sc, TSEC_REG_MACCFG2, tmp);
1663 /* XXX kludge - use circumstantial evidence for reduced mode. */
1664 id = TSEC_READ(sc, TSEC_REG_ID2);
1666 ecntrl = TSEC_READ(sc, TSEC_REG_ECNTRL) & ~TSEC_ECNTRL_R100M;
1667 ecntrl |= (tmp & TSEC_MACCFG2_MII) ? TSEC_ECNTRL_R100M : 0;
1668 TSEC_WRITE(sc, TSEC_REG_ECNTRL, ecntrl);
1673 tsec_add_sysctls(struct tsec_softc *sc)
1675 struct sysctl_ctx_list *ctx;
1676 struct sysctl_oid_list *children;
1677 struct sysctl_oid *tree;
1679 ctx = device_get_sysctl_ctx(sc->dev);
1680 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1681 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1682 CTLFLAG_RD, 0, "TSEC Interrupts coalescing");
1683 children = SYSCTL_CHILDREN(tree);
1685 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1686 CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_RX, tsec_sysctl_ic_time,
1687 "I", "IC RX time threshold (0-65535)");
1688 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_count",
1689 CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_RX, tsec_sysctl_ic_count,
1690 "I", "IC RX frame count threshold (0-255)");
1692 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1693 CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_TX, tsec_sysctl_ic_time,
1694 "I", "IC TX time threshold (0-65535)");
1695 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_count",
1696 CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_TX, tsec_sysctl_ic_count,
1697 "I", "IC TX frame count threshold (0-255)");
1701 * With Interrupt Coalescing (IC) active, a transmit/receive frame
1702 * interrupt is raised either upon:
1704 * - threshold-defined period of time elapsed, or
1705 * - threshold-defined number of frames is received/transmitted,
1706 * whichever occurs first.
1708 * The following sysctls regulate IC behaviour (for TX/RX separately):
1710 * dev.tsec.<unit>.int_coal.rx_time
1711 * dev.tsec.<unit>.int_coal.rx_count
1712 * dev.tsec.<unit>.int_coal.tx_time
1713 * dev.tsec.<unit>.int_coal.tx_count
1717 * - 0 for either time or count disables IC on the given TX/RX path
1719 * - count: 1-255 (expresses frame count number; note that value of 1 is
1720 * effectively IC off)
1722 * - time: 1-65535 (value corresponds to a real time period and is
1723 * expressed in units equivalent to 64 TSEC interface clocks, i.e. one timer
1724 * threshold unit is 26.5 us, 2.56 us, or 512 ns, corresponding to 10 Mbps,
1725 * 100 Mbps, or 1Gbps, respectively. For detailed discussion consult the
1726 * TSEC reference manual.
1729 tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS)
1733 struct tsec_softc *sc = (struct tsec_softc *)arg1;
1735 time = (arg2 == TSEC_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1737 error = sysctl_handle_int(oidp, &time, 0, req);
1745 if (arg2 == TSEC_IC_RX) {
1746 sc->rx_ic_time = time;
1749 sc->tx_ic_time = time;
1758 tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS)
1762 struct tsec_softc *sc = (struct tsec_softc *)arg1;
1764 count = (arg2 == TSEC_IC_RX) ? sc->rx_ic_count : sc->tx_ic_count;
1766 error = sysctl_handle_int(oidp, &count, 0, req);
1774 if (arg2 == TSEC_IC_RX) {
1775 sc->rx_ic_count = count;
1778 sc->tx_ic_count = count;
1787 tsec_set_rxic(struct tsec_softc *sc)
1791 if (sc->rx_ic_count == 0 || sc->rx_ic_time == 0)
1795 rxic_val = 0x80000000;
1796 rxic_val |= (sc->rx_ic_count << 21);
1797 rxic_val |= sc->rx_ic_time;
1800 TSEC_WRITE(sc, TSEC_REG_RXIC, rxic_val);
1804 tsec_set_txic(struct tsec_softc *sc)
1808 if (sc->tx_ic_count == 0 || sc->tx_ic_time == 0)
1812 txic_val = 0x80000000;
1813 txic_val |= (sc->tx_ic_count << 21);
1814 txic_val |= sc->tx_ic_time;
1817 TSEC_WRITE(sc, TSEC_REG_TXIC, txic_val);
1821 tsec_offload_setup(struct tsec_softc *sc)
1823 struct ifnet *ifp = sc->tsec_ifp;
1826 TSEC_GLOBAL_LOCK_ASSERT(sc);
1828 reg = TSEC_READ(sc, TSEC_REG_TCTRL);
1829 reg |= TSEC_TCTRL_IPCSEN | TSEC_TCTRL_TUCSEN;
1831 if (ifp->if_capenable & IFCAP_TXCSUM)
1832 ifp->if_hwassist = TSEC_CHECKSUM_FEATURES;
1834 ifp->if_hwassist = 0;
1836 TSEC_WRITE(sc, TSEC_REG_TCTRL, reg);
1838 reg = TSEC_READ(sc, TSEC_REG_RCTRL);
1839 reg &= ~(TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | TSEC_RCTRL_PRSDEP);
1840 reg |= TSEC_RCTRL_PRSDEP_PARSE_L2 | TSEC_RCTRL_VLEX;
1842 if (ifp->if_capenable & IFCAP_RXCSUM)
1843 reg |= TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN |
1844 TSEC_RCTRL_PRSDEP_PARSE_L234;
1846 TSEC_WRITE(sc, TSEC_REG_RCTRL, reg);
1851 tsec_offload_process_frame(struct tsec_softc *sc, struct mbuf *m)
1853 struct tsec_rx_fcb rx_fcb;
1855 int protocol, flags;
1857 TSEC_RECEIVE_LOCK_ASSERT(sc);
1859 m_copydata(m, 0, sizeof(struct tsec_rx_fcb), (caddr_t)(&rx_fcb));
1860 flags = rx_fcb.flags;
1861 protocol = rx_fcb.protocol;
1863 if (TSEC_RX_FCB_IP_CSUM_CHECKED(flags)) {
1864 csum_flags |= CSUM_IP_CHECKED;
1866 if ((flags & TSEC_RX_FCB_IP_CSUM_ERROR) == 0)
1867 csum_flags |= CSUM_IP_VALID;
1870 if ((protocol == IPPROTO_TCP || protocol == IPPROTO_UDP) &&
1871 TSEC_RX_FCB_TCP_UDP_CSUM_CHECKED(flags) &&
1872 (flags & TSEC_RX_FCB_TCP_UDP_CSUM_ERROR) == 0) {
1874 csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1875 m->m_pkthdr.csum_data = 0xFFFF;
1878 m->m_pkthdr.csum_flags = csum_flags;
1880 if (flags & TSEC_RX_FCB_VLAN) {
1881 m->m_pkthdr.ether_vtag = rx_fcb.vlan;
1882 m->m_flags |= M_VLANTAG;
1885 m_adj(m, sizeof(struct tsec_rx_fcb));
1889 tsec_setup_multicast(struct tsec_softc *sc)
1891 uint32_t hashtable[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
1892 struct ifnet *ifp = sc->tsec_ifp;
1893 struct ifmultiaddr *ifma;
1897 TSEC_GLOBAL_LOCK_ASSERT(sc);
1899 if (ifp->if_flags & IFF_ALLMULTI) {
1900 for (i = 0; i < 8; i++)
1901 TSEC_WRITE(sc, TSEC_REG_GADDR(i), 0xFFFFFFFF);
1906 if_maddr_rlock(ifp);
1907 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1909 if (ifma->ifma_addr->sa_family != AF_LINK)
1912 h = (ether_crc32_be(LLADDR((struct sockaddr_dl *)
1913 ifma->ifma_addr), ETHER_ADDR_LEN) >> 24) & 0xFF;
1915 hashtable[(h >> 5)] |= 1 << (0x1F - (h & 0x1F));
1917 if_maddr_runlock(ifp);
1919 for (i = 0; i < 8; i++)
1920 TSEC_WRITE(sc, TSEC_REG_GADDR(i), hashtable[i]);
1924 tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu)
1927 mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
1929 TSEC_GLOBAL_LOCK_ASSERT(sc);
1931 if (mtu >= TSEC_MIN_FRAME_SIZE && mtu <= TSEC_MAX_FRAME_SIZE) {
1932 TSEC_WRITE(sc, TSEC_REG_MAXFRM, mtu);