2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/endian.h>
41 #include <sys/kernel.h>
43 #include <sys/malloc.h>
47 #include <sys/module.h>
48 #include <sys/socket.h>
49 #include <sys/sockio.h>
50 #include <sys/sysctl.h>
52 #include <net/ethernet.h>
54 #include <net/if_dl.h>
55 #include <net/if_types.h>
57 #include <net/if_arp.h>
58 #include <net/if_dl.h>
59 #include <net/if_media.h>
60 #include <net/if_vlan_var.h>
62 #include <machine/bus.h>
64 #include <dev/mii/miivar.h>
65 #include <dev/mii/truephyreg.h>
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
70 #include <dev/et/if_etreg.h>
71 #include <dev/et/if_etvar.h>
73 #include "miibus_if.h"
75 MODULE_DEPEND(et, pci, 1, 1, 1);
76 MODULE_DEPEND(et, ether, 1, 1, 1);
77 MODULE_DEPEND(et, miibus, 1, 1, 1);
79 static int et_probe(device_t);
80 static int et_attach(device_t);
81 static int et_detach(device_t);
82 static int et_shutdown(device_t);
84 static int et_miibus_readreg(device_t, int, int);
85 static int et_miibus_writereg(device_t, int, int, int);
86 static void et_miibus_statchg(device_t);
88 static void et_init_locked(struct et_softc *);
89 static void et_init(void *);
90 static int et_ioctl(struct ifnet *, u_long, caddr_t);
91 static void et_start_locked(struct ifnet *);
92 static void et_start(struct ifnet *);
93 static void et_watchdog(struct et_softc *);
94 static int et_ifmedia_upd_locked(struct ifnet *);
95 static int et_ifmedia_upd(struct ifnet *);
96 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
98 static void et_add_sysctls(struct et_softc *);
99 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
100 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
102 static void et_intr(void *);
103 static void et_enable_intrs(struct et_softc *, uint32_t);
104 static void et_disable_intrs(struct et_softc *);
105 static void et_rxeof(struct et_softc *);
106 static void et_txeof(struct et_softc *);
108 static int et_dma_alloc(device_t);
109 static void et_dma_free(device_t);
110 static int et_dma_mem_create(device_t, bus_size_t, bus_dma_tag_t *,
111 void **, bus_addr_t *, bus_dmamap_t *);
112 static void et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t);
113 static int et_dma_mbuf_create(device_t);
114 static void et_dma_mbuf_destroy(device_t, int, const int[]);
115 static void et_dma_ring_addr(void *, bus_dma_segment_t *, int, int);
116 static void et_dma_buf_addr(void *, bus_dma_segment_t *, int,
118 static int et_init_tx_ring(struct et_softc *);
119 static int et_init_rx_ring(struct et_softc *);
120 static void et_free_tx_ring(struct et_softc *);
121 static void et_free_rx_ring(struct et_softc *);
122 static int et_encap(struct et_softc *, struct mbuf **);
123 static int et_newbuf(struct et_rxbuf_data *, int, int, int);
124 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int);
125 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int);
127 static void et_stop(struct et_softc *);
128 static int et_chip_init(struct et_softc *);
129 static void et_chip_attach(struct et_softc *);
130 static void et_init_mac(struct et_softc *);
131 static void et_init_rxmac(struct et_softc *);
132 static void et_init_txmac(struct et_softc *);
133 static int et_init_rxdma(struct et_softc *);
134 static int et_init_txdma(struct et_softc *);
135 static int et_start_rxdma(struct et_softc *);
136 static int et_start_txdma(struct et_softc *);
137 static int et_stop_rxdma(struct et_softc *);
138 static int et_stop_txdma(struct et_softc *);
139 static int et_enable_txrx(struct et_softc *, int);
140 static void et_reset(struct et_softc *);
141 static int et_bus_config(device_t);
142 static void et_get_eaddr(device_t, uint8_t[]);
143 static void et_setmulti(struct et_softc *);
144 static void et_tick(void *);
145 static void et_setmedia(struct et_softc *);
146 static void et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t);
148 static const struct et_dev {
153 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
154 "Agere ET1310 Gigabit Ethernet" },
155 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
156 "Agere ET1310 Fast Ethernet" },
160 static device_method_t et_methods[] = {
161 DEVMETHOD(device_probe, et_probe),
162 DEVMETHOD(device_attach, et_attach),
163 DEVMETHOD(device_detach, et_detach),
164 DEVMETHOD(device_shutdown, et_shutdown),
166 DEVMETHOD(bus_print_child, bus_generic_print_child),
167 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
169 DEVMETHOD(miibus_readreg, et_miibus_readreg),
170 DEVMETHOD(miibus_writereg, et_miibus_writereg),
171 DEVMETHOD(miibus_statchg, et_miibus_statchg),
176 static driver_t et_driver = {
179 sizeof(struct et_softc)
182 static devclass_t et_devclass;
184 DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0);
185 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0);
187 static int et_rx_intr_npkts = 32;
188 static int et_rx_intr_delay = 20; /* x10 usec */
189 static int et_tx_intr_nsegs = 126;
190 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
192 TUNABLE_INT("hw.et.timer", &et_timer);
193 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
194 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
195 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
202 static const struct et_bsize et_bufsize_std[ET_RX_NRING] = {
203 { .bufsize = ET_RXDMA_CTRL_RING0_128,
204 .newbuf = et_newbuf_hdr },
205 { .bufsize = ET_RXDMA_CTRL_RING1_2048,
206 .newbuf = et_newbuf_cluster },
210 et_probe(device_t dev)
212 const struct et_dev *d;
215 vid = pci_get_vendor(dev);
216 did = pci_get_device(dev);
218 for (d = et_devices; d->desc != NULL; ++d) {
219 if (vid == d->vid && did == d->did) {
220 device_set_desc(dev, d->desc);
228 et_attach(device_t dev)
232 uint8_t eaddr[ETHER_ADDR_LEN];
235 sc = device_get_softc(dev);
237 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
240 ifp = sc->ifp = if_alloc(IFT_ETHER);
242 device_printf(dev, "can not if_alloc()\n");
248 * Initialize tunables
250 sc->sc_rx_intr_npkts = et_rx_intr_npkts;
251 sc->sc_rx_intr_delay = et_rx_intr_delay;
252 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
253 sc->sc_timer = et_timer;
255 /* Enable bus mastering */
256 pci_enable_busmaster(dev);
261 sc->sc_mem_rid = ET_PCIR_BAR;
262 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
263 &sc->sc_mem_rid, RF_ACTIVE);
264 if (sc->sc_mem_res == NULL) {
265 device_printf(dev, "can't allocate IO memory\n");
268 sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res);
269 sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res);
275 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
277 RF_SHAREABLE | RF_ACTIVE);
278 if (sc->sc_irq_res == NULL) {
279 device_printf(dev, "can't allocate irq\n");
284 error = et_bus_config(dev);
288 et_get_eaddr(dev, eaddr);
290 CSR_WRITE_4(sc, ET_PM,
291 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
295 et_disable_intrs(sc);
297 error = et_dma_alloc(dev);
302 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
303 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
304 ifp->if_init = et_init;
305 ifp->if_ioctl = et_ioctl;
306 ifp->if_start = et_start;
307 ifp->if_mtu = ETHERMTU;
308 ifp->if_capabilities = IFCAP_VLAN_MTU;
309 ifp->if_capenable = ifp->if_capabilities;
310 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC);
311 IFQ_SET_READY(&ifp->if_snd);
315 error = mii_phy_probe(dev, &sc->sc_miibus,
316 et_ifmedia_upd, et_ifmedia_sts);
318 device_printf(dev, "can't probe any PHY\n");
322 ether_ifattach(ifp, eaddr);
323 callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
325 #if __FreeBSD_version > 700030
326 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
327 NULL, et_intr, sc, &sc->sc_irq_handle);
329 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
330 et_intr, sc, &sc->sc_irq_handle);
335 device_printf(dev, "can't setup intr\n");
348 et_detach(device_t dev)
350 struct et_softc *sc = device_get_softc(dev);
352 if (device_is_attached(dev)) {
353 struct ifnet *ifp = sc->ifp;
357 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
363 if (sc->sc_miibus != NULL)
364 device_delete_child(dev, sc->sc_miibus);
365 bus_generic_detach(dev);
367 if (sc->sc_irq_res != NULL) {
368 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
372 if (sc->sc_mem_res != NULL) {
373 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
381 /* XXX Destroy lock here */
387 et_shutdown(device_t dev)
389 struct et_softc *sc = device_get_softc(dev);
398 et_miibus_readreg(device_t dev, int phy, int reg)
400 struct et_softc *sc = device_get_softc(dev);
404 /* Stop any pending operations */
405 CSR_WRITE_4(sc, ET_MII_CMD, 0);
407 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
408 __SHIFTIN(reg, ET_MII_ADDR_REG);
409 CSR_WRITE_4(sc, ET_MII_ADDR, val);
412 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
416 for (i = 0; i < NRETRY; ++i) {
417 val = CSR_READ_4(sc, ET_MII_IND);
418 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
424 "read phy %d, reg %d timed out\n", phy, reg);
431 val = CSR_READ_4(sc, ET_MII_STAT);
432 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE);
435 /* Make sure that the current operation is stopped */
436 CSR_WRITE_4(sc, ET_MII_CMD, 0);
441 et_miibus_writereg(device_t dev, int phy, int reg, int val0)
443 struct et_softc *sc = device_get_softc(dev);
447 /* Stop any pending operations */
448 CSR_WRITE_4(sc, ET_MII_CMD, 0);
450 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
451 __SHIFTIN(reg, ET_MII_ADDR_REG);
452 CSR_WRITE_4(sc, ET_MII_ADDR, val);
455 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE));
459 for (i = 0; i < NRETRY; ++i) {
460 val = CSR_READ_4(sc, ET_MII_IND);
461 if ((val & ET_MII_IND_BUSY) == 0)
467 "write phy %d, reg %d timed out\n", phy, reg);
468 et_miibus_readreg(dev, phy, reg);
473 /* Make sure that the current operation is stopped */
474 CSR_WRITE_4(sc, ET_MII_CMD, 0);
479 et_miibus_statchg(device_t dev)
481 et_setmedia(device_get_softc(dev));
485 et_ifmedia_upd_locked(struct ifnet *ifp)
487 struct et_softc *sc = ifp->if_softc;
488 struct mii_data *mii = device_get_softc(sc->sc_miibus);
490 if (mii->mii_instance != 0) {
491 struct mii_softc *miisc;
493 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
494 mii_phy_reset(miisc);
502 et_ifmedia_upd(struct ifnet *ifp)
504 struct et_softc *sc = ifp->if_softc;
508 res = et_ifmedia_upd_locked(ifp);
515 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
517 struct et_softc *sc = ifp->if_softc;
518 struct mii_data *mii = device_get_softc(sc->sc_miibus);
521 ifmr->ifm_active = mii->mii_media_active;
522 ifmr->ifm_status = mii->mii_media_status;
526 et_stop(struct et_softc *sc)
528 struct ifnet *ifp = sc->ifp;
532 callout_stop(&sc->sc_tick);
537 et_disable_intrs(sc);
546 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
548 sc->watchdog_timer = 0;
549 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
553 et_bus_config(device_t dev)
555 uint32_t val, max_plsz;
556 uint16_t ack_latency, replay_timer;
559 * Test whether EEPROM is valid
560 * NOTE: Read twice to get the correct value
562 pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1);
563 val = pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1);
564 if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
565 device_printf(dev, "EEPROM status error 0x%02x\n", val);
572 * Configure ACK latency and replay timer according to
575 val = pci_read_config(dev, ET_PCIR_DEVICE_CAPS, 4);
576 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ;
579 case ET_PCIV_DEVICE_CAPS_PLSZ_128:
580 ack_latency = ET_PCIV_ACK_LATENCY_128;
581 replay_timer = ET_PCIV_REPLAY_TIMER_128;
584 case ET_PCIV_DEVICE_CAPS_PLSZ_256:
585 ack_latency = ET_PCIV_ACK_LATENCY_256;
586 replay_timer = ET_PCIV_REPLAY_TIMER_256;
590 ack_latency = pci_read_config(dev, ET_PCIR_ACK_LATENCY, 2);
591 replay_timer = pci_read_config(dev, ET_PCIR_REPLAY_TIMER, 2);
592 device_printf(dev, "ack latency %u, replay timer %u\n",
593 ack_latency, replay_timer);
596 if (ack_latency != 0) {
597 pci_write_config(dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
598 pci_write_config(dev, ET_PCIR_REPLAY_TIMER, replay_timer, 2);
602 * Set L0s and L1 latency timer to 2us
604 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2);
605 pci_write_config(dev, ET_PCIR_L0S_L1_LATENCY, val, 1);
608 * Set max read request size to 2048 bytes
610 val = pci_read_config(dev, ET_PCIR_DEVICE_CTRL, 2);
611 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ;
612 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
613 pci_write_config(dev, ET_PCIR_DEVICE_CTRL, val, 2);
619 et_get_eaddr(device_t dev, uint8_t eaddr[])
624 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
625 for (i = 0; i < 4; ++i)
626 eaddr[i] = (val >> (8 * i)) & 0xff;
628 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
629 for (; i < ETHER_ADDR_LEN; ++i)
630 eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
634 et_reset(struct et_softc *sc)
636 CSR_WRITE_4(sc, ET_MAC_CFG1,
637 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
638 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
639 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
641 CSR_WRITE_4(sc, ET_SWRST,
642 ET_SWRST_TXDMA | ET_SWRST_RXDMA |
643 ET_SWRST_TXMAC | ET_SWRST_RXMAC |
644 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
646 CSR_WRITE_4(sc, ET_MAC_CFG1,
647 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
648 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
649 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
653 et_disable_intrs(struct et_softc *sc)
655 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
659 et_enable_intrs(struct et_softc *sc, uint32_t intrs)
661 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
665 et_dma_alloc(device_t dev)
667 struct et_softc *sc = device_get_softc(dev);
668 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
669 struct et_txstatus_data *txsd = &sc->sc_tx_status;
670 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
671 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
675 * Create top level DMA tag
677 error = bus_dma_tag_create(NULL, 1, 0,
678 BUS_SPACE_MAXADDR_32BIT,
682 BUS_SPACE_UNRESTRICTED,
683 BUS_SPACE_MAXSIZE_32BIT,
684 0, NULL, NULL, &sc->sc_dtag);
686 device_printf(dev, "can't create DMA tag\n");
691 * Create TX ring DMA stuffs
693 error = et_dma_mem_create(dev, ET_TX_RING_SIZE, &tx_ring->tr_dtag,
694 (void **)&tx_ring->tr_desc,
695 &tx_ring->tr_paddr, &tx_ring->tr_dmap);
697 device_printf(dev, "can't create TX ring DMA stuffs\n");
702 * Create TX status DMA stuffs
704 error = et_dma_mem_create(dev, sizeof(uint32_t), &txsd->txsd_dtag,
705 (void **)&txsd->txsd_status,
706 &txsd->txsd_paddr, &txsd->txsd_dmap);
708 device_printf(dev, "can't create TX status DMA stuffs\n");
713 * Create DMA stuffs for RX rings
715 for (i = 0; i < ET_RX_NRING; ++i) {
716 static const uint32_t rx_ring_posreg[ET_RX_NRING] =
717 { ET_RX_RING0_POS, ET_RX_RING1_POS };
719 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
721 error = et_dma_mem_create(dev, ET_RX_RING_SIZE,
723 (void **)&rx_ring->rr_desc,
727 device_printf(dev, "can't create DMA stuffs for "
728 "the %d RX ring\n", i);
731 rx_ring->rr_posreg = rx_ring_posreg[i];
735 * Create RX stat ring DMA stuffs
737 error = et_dma_mem_create(dev, ET_RXSTAT_RING_SIZE,
738 &rxst_ring->rsr_dtag,
739 (void **)&rxst_ring->rsr_stat,
740 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap);
742 device_printf(dev, "can't create RX stat ring DMA stuffs\n");
747 * Create RX status DMA stuffs
749 error = et_dma_mem_create(dev, sizeof(struct et_rxstatus),
751 (void **)&rxsd->rxsd_status,
752 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap);
754 device_printf(dev, "can't create RX status DMA stuffs\n");
759 * Create mbuf DMA stuffs
761 error = et_dma_mbuf_create(dev);
769 et_dma_free(device_t dev)
771 struct et_softc *sc = device_get_softc(dev);
772 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
773 struct et_txstatus_data *txsd = &sc->sc_tx_status;
774 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
775 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
776 int i, rx_done[ET_RX_NRING];
779 * Destroy TX ring DMA stuffs
781 et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc,
785 * Destroy TX status DMA stuffs
787 et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status,
791 * Destroy DMA stuffs for RX rings
793 for (i = 0; i < ET_RX_NRING; ++i) {
794 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
796 et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc,
801 * Destroy RX stat ring DMA stuffs
803 et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat,
804 rxst_ring->rsr_dmap);
807 * Destroy RX status DMA stuffs
809 et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status,
813 * Destroy mbuf DMA stuffs
815 for (i = 0; i < ET_RX_NRING; ++i)
816 rx_done[i] = ET_RX_NDESC;
817 et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done);
820 * Destroy top level DMA tag
822 if (sc->sc_dtag != NULL)
823 bus_dma_tag_destroy(sc->sc_dtag);
827 et_dma_mbuf_create(device_t dev)
829 struct et_softc *sc = device_get_softc(dev);
830 struct et_txbuf_data *tbd = &sc->sc_tx_data;
831 int i, error, rx_done[ET_RX_NRING];
834 * Create mbuf DMA tag
836 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
837 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
839 ET_JUMBO_FRAMELEN, ET_NSEG_MAX,
840 BUS_SPACE_MAXSIZE_32BIT,
841 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_mbuf_dtag);
843 device_printf(dev, "can't create mbuf DMA tag\n");
848 * Create spare DMA map for RX mbufs
850 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, &sc->sc_mbuf_tmp_dmap);
852 device_printf(dev, "can't create spare mbuf DMA map\n");
853 bus_dma_tag_destroy(sc->sc_mbuf_dtag);
854 sc->sc_mbuf_dtag = NULL;
859 * Create DMA maps for RX mbufs
861 bzero(rx_done, sizeof(rx_done));
862 for (i = 0; i < ET_RX_NRING; ++i) {
863 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
866 for (j = 0; j < ET_RX_NDESC; ++j) {
867 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
868 &rbd->rbd_buf[j].rb_dmap);
870 device_printf(dev, "can't create %d RX mbuf "
871 "for %d RX ring\n", j, i);
873 et_dma_mbuf_destroy(dev, 0, rx_done);
877 rx_done[i] = ET_RX_NDESC;
880 rbd->rbd_ring = &sc->sc_rx_ring[i];
884 * Create DMA maps for TX mbufs
886 for (i = 0; i < ET_TX_NDESC; ++i) {
887 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
888 &tbd->tbd_buf[i].tb_dmap);
890 device_printf(dev, "can't create %d TX mbuf "
892 et_dma_mbuf_destroy(dev, i, rx_done);
901 et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[])
903 struct et_softc *sc = device_get_softc(dev);
904 struct et_txbuf_data *tbd = &sc->sc_tx_data;
907 if (sc->sc_mbuf_dtag == NULL)
911 * Destroy DMA maps for RX mbufs
913 for (i = 0; i < ET_RX_NRING; ++i) {
914 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
917 for (j = 0; j < rx_done[i]; ++j) {
918 struct et_rxbuf *rb = &rbd->rbd_buf[j];
920 KASSERT(rb->rb_mbuf == NULL,
921 ("RX mbuf in %d RX ring is not freed yet\n", i));
922 bus_dmamap_destroy(sc->sc_mbuf_dtag, rb->rb_dmap);
927 * Destroy DMA maps for TX mbufs
929 for (i = 0; i < tx_done; ++i) {
930 struct et_txbuf *tb = &tbd->tbd_buf[i];
932 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n"));
933 bus_dmamap_destroy(sc->sc_mbuf_dtag, tb->tb_dmap);
937 * Destroy spare mbuf DMA map
939 bus_dmamap_destroy(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap);
942 * Destroy mbuf DMA tag
944 bus_dma_tag_destroy(sc->sc_mbuf_dtag);
945 sc->sc_mbuf_dtag = NULL;
949 et_dma_mem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag,
950 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap)
952 struct et_softc *sc = device_get_softc(dev);
955 error = bus_dma_tag_create(sc->sc_dtag, ET_ALIGN, 0,
956 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
958 size, 1, BUS_SPACE_MAXSIZE_32BIT,
959 0, NULL, NULL, dtag);
961 device_printf(dev, "can't create DMA tag\n");
965 error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO,
968 device_printf(dev, "can't allocate DMA mem\n");
969 bus_dma_tag_destroy(*dtag);
974 error = bus_dmamap_load(*dtag, *dmap, *addr, size,
975 et_dma_ring_addr, paddr, BUS_DMA_WAITOK);
977 device_printf(dev, "can't load DMA mem\n");
978 bus_dmamem_free(*dtag, *addr, *dmap);
979 bus_dma_tag_destroy(*dtag);
987 et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap)
990 bus_dmamap_unload(dtag, dmap);
991 bus_dmamem_free(dtag, addr, dmap);
992 bus_dma_tag_destroy(dtag);
997 et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
999 KASSERT(nseg == 1, ("too many segments\n"));
1000 *((bus_addr_t *)arg) = seg->ds_addr;
1004 et_chip_attach(struct et_softc *sc)
1009 * Perform minimal initialization
1012 /* Disable loopback */
1013 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1016 CSR_WRITE_4(sc, ET_MAC_CFG1,
1017 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1018 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1019 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1022 * Setup half duplex mode
1024 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1025 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1026 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1027 ET_MAC_HDX_EXC_DEFER;
1028 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1030 /* Clear MAC control */
1031 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1034 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1036 /* Bring MAC out of reset state */
1037 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1039 /* Enable memory controllers */
1040 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1046 struct et_softc *sc = xsc;
1052 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1057 et_disable_intrs(sc);
1059 intrs = CSR_READ_4(sc, ET_INTR_STATUS);
1061 if (intrs == 0) /* Not interested */
1064 if (intrs & ET_INTR_RXEOF)
1066 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
1068 if (intrs & ET_INTR_TIMER)
1069 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1071 et_enable_intrs(sc, ET_INTRS);
1076 et_init_locked(struct et_softc *sc)
1078 struct ifnet *ifp = sc->ifp;
1079 const struct et_bsize *arr;
1084 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1089 arr = et_bufsize_std;
1090 for (i = 0; i < ET_RX_NRING; ++i) {
1091 sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize;
1092 sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf;
1095 error = et_init_tx_ring(sc);
1099 error = et_init_rx_ring(sc);
1103 error = et_chip_init(sc);
1107 error = et_enable_txrx(sc, 1);
1111 et_enable_intrs(sc, ET_INTRS);
1113 callout_reset(&sc->sc_tick, hz, et_tick, sc);
1115 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1117 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1118 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1127 struct et_softc *sc = xsc;
1135 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1137 struct et_softc *sc = ifp->if_softc;
1138 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1139 struct ifreq *ifr = (struct ifreq *)data;
1140 int error = 0, max_framelen;
1146 if (ifp->if_flags & IFF_UP) {
1147 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1148 if ((ifp->if_flags ^ sc->sc_if_flags) &
1149 (IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1155 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1158 sc->sc_if_flags = ifp->if_flags;
1164 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1169 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1179 if (sc->sc_flags & ET_FLAG_JUMBO)
1180 max_framelen = ET_JUMBO_FRAMELEN;
1183 max_framelen = MCLBYTES - 1;
1185 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1190 if (ifp->if_mtu != ifr->ifr_mtu) {
1191 ifp->if_mtu = ifr->ifr_mtu;
1192 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1198 error = ether_ioctl(ifp, cmd, data);
1205 et_start_locked(struct ifnet *ifp)
1207 struct et_softc *sc = ifp->if_softc;
1208 struct et_txbuf_data *tbd;
1212 tbd = &sc->sc_tx_data;
1214 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1217 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING)
1224 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) {
1225 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1229 IFQ_DEQUEUE(&ifp->if_snd, m);
1233 if (et_encap(sc, &m)) {
1235 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1244 sc->watchdog_timer = 5;
1248 et_start(struct ifnet *ifp)
1250 struct et_softc *sc = ifp->if_softc;
1253 et_start_locked(ifp);
1258 et_watchdog(struct et_softc *sc)
1262 if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
1265 if_printf(sc->ifp, "watchdog timed out\n");
1268 et_start_locked(sc->ifp);
1272 et_stop_rxdma(struct et_softc *sc)
1274 CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1275 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1278 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1279 if_printf(sc->ifp, "can't stop RX DMA engine\n");
1286 et_stop_txdma(struct et_softc *sc)
1288 CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1289 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1294 et_free_tx_ring(struct et_softc *sc)
1296 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1297 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1300 for (i = 0; i < ET_TX_NDESC; ++i) {
1301 struct et_txbuf *tb = &tbd->tbd_buf[i];
1303 if (tb->tb_mbuf != NULL) {
1304 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1305 m_freem(tb->tb_mbuf);
1310 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1311 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1312 BUS_DMASYNC_PREWRITE);
1316 et_free_rx_ring(struct et_softc *sc)
1320 for (n = 0; n < ET_RX_NRING; ++n) {
1321 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1322 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
1325 for (i = 0; i < ET_RX_NDESC; ++i) {
1326 struct et_rxbuf *rb = &rbd->rbd_buf[i];
1328 if (rb->rb_mbuf != NULL) {
1329 bus_dmamap_unload(sc->sc_mbuf_dtag,
1331 m_freem(rb->rb_mbuf);
1336 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
1337 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
1338 BUS_DMASYNC_PREWRITE);
1343 et_setmulti(struct et_softc *sc)
1346 uint32_t hash[4] = { 0, 0, 0, 0 };
1347 uint32_t rxmac_ctrl, pktfilt;
1348 struct ifmultiaddr *ifma;
1354 pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1355 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1357 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1358 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1359 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1365 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1368 if (ifma->ifma_addr->sa_family != AF_LINK)
1371 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1372 ifma->ifma_addr), ETHER_ADDR_LEN);
1373 h = (h & 0x3f800000) >> 23;
1376 if (h >= 32 && h < 64) {
1379 } else if (h >= 64 && h < 96) {
1382 } else if (h >= 96) {
1390 IF_ADDR_UNLOCK(ifp);
1392 for (i = 0; i < 4; ++i)
1393 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1396 pktfilt |= ET_PKTFILT_MCAST;
1397 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1399 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1400 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1404 et_chip_init(struct et_softc *sc)
1406 struct ifnet *ifp = sc->ifp;
1408 int error, frame_len, rxmem_size;
1411 * Split 16Kbytes internal memory between TX and RX
1412 * according to frame length.
1414 frame_len = ET_FRAMELEN(ifp->if_mtu);
1415 if (frame_len < 2048) {
1416 rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1417 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1418 rxmem_size = ET_MEM_SIZE / 2;
1420 rxmem_size = ET_MEM_SIZE -
1421 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1423 rxq_end = ET_QUEUE_ADDR(rxmem_size);
1425 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1426 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1427 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1428 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1431 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1433 /* Clear MSI configure */
1434 CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1437 CSR_WRITE_4(sc, ET_TIMER, 0);
1439 /* Initialize MAC */
1442 /* Enable memory controllers */
1443 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1445 /* Initialize RX MAC */
1448 /* Initialize TX MAC */
1451 /* Initialize RX DMA engine */
1452 error = et_init_rxdma(sc);
1456 /* Initialize TX DMA engine */
1457 error = et_init_txdma(sc);
1465 et_init_tx_ring(struct et_softc *sc)
1467 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1468 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1469 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1471 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1472 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1473 BUS_DMASYNC_PREWRITE);
1475 tbd->tbd_start_index = 0;
1476 tbd->tbd_start_wrap = 0;
1479 bzero(txsd->txsd_status, sizeof(uint32_t));
1480 bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1481 BUS_DMASYNC_PREWRITE);
1486 et_init_rx_ring(struct et_softc *sc)
1488 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1489 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1492 for (n = 0; n < ET_RX_NRING; ++n) {
1493 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1496 for (i = 0; i < ET_RX_NDESC; ++i) {
1497 error = rbd->rbd_newbuf(rbd, i, 1);
1499 if_printf(sc->ifp, "%d ring %d buf, "
1500 "newbuf failed: %d\n", n, i, error);
1506 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1507 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1508 BUS_DMASYNC_PREWRITE);
1510 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1511 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1512 BUS_DMASYNC_PREWRITE);
1518 et_dma_buf_addr(void *xctx, bus_dma_segment_t *segs, int nsegs,
1519 bus_size_t mapsz __unused, int error)
1521 struct et_dmamap_ctx *ctx = xctx;
1527 if (nsegs > ctx->nsegs) {
1533 for (i = 0; i < nsegs; ++i)
1534 ctx->segs[i] = segs[i];
1538 et_init_rxdma(struct et_softc *sc)
1540 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1541 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1542 struct et_rxdesc_ring *rx_ring;
1545 error = et_stop_rxdma(sc);
1547 if_printf(sc->ifp, "can't init RX DMA engine\n");
1554 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1555 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1558 * Install RX stat ring
1560 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1561 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1562 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1563 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1564 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1566 /* Match ET_RXSTAT_POS */
1567 rxst_ring->rsr_index = 0;
1568 rxst_ring->rsr_wrap = 0;
1571 * Install the 2nd RX descriptor ring
1573 rx_ring = &sc->sc_rx_ring[1];
1574 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1575 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1576 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1577 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1578 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1580 /* Match ET_RX_RING1_POS */
1581 rx_ring->rr_index = 0;
1582 rx_ring->rr_wrap = 1;
1585 * Install the 1st RX descriptor ring
1587 rx_ring = &sc->sc_rx_ring[0];
1588 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1589 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1590 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1591 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1592 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1594 /* Match ET_RX_RING0_POS */
1595 rx_ring->rr_index = 0;
1596 rx_ring->rr_wrap = 1;
1599 * RX intr moderation
1601 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1602 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1608 et_init_txdma(struct et_softc *sc)
1610 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1611 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1614 error = et_stop_txdma(sc);
1616 if_printf(sc->ifp, "can't init TX DMA engine\n");
1621 * Install TX descriptor ring
1623 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1624 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1625 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1630 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1631 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1633 CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1635 /* Match ET_TX_READY_POS */
1636 tx_ring->tr_ready_index = 0;
1637 tx_ring->tr_ready_wrap = 0;
1643 et_init_mac(struct et_softc *sc)
1645 struct ifnet *ifp = sc->ifp;
1646 const uint8_t *eaddr = IF_LLADDR(ifp);
1650 CSR_WRITE_4(sc, ET_MAC_CFG1,
1651 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1652 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1653 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1656 * Setup inter packet gap
1658 val = __SHIFTIN(56, ET_IPG_NONB2B_1) |
1659 __SHIFTIN(88, ET_IPG_NONB2B_2) |
1660 __SHIFTIN(80, ET_IPG_MINIFG) |
1661 __SHIFTIN(96, ET_IPG_B2B);
1662 CSR_WRITE_4(sc, ET_IPG, val);
1665 * Setup half duplex mode
1667 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1668 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1669 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1670 ET_MAC_HDX_EXC_DEFER;
1671 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1673 /* Clear MAC control */
1674 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1677 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1682 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1683 CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1684 val = (eaddr[0] << 16) | (eaddr[1] << 24);
1685 CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1687 /* Set max frame length */
1688 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1690 /* Bring MAC out of reset state */
1691 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1695 et_init_rxmac(struct et_softc *sc)
1697 struct ifnet *ifp = sc->ifp;
1698 const uint8_t *eaddr = IF_LLADDR(ifp);
1702 /* Disable RX MAC and WOL */
1703 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1706 * Clear all WOL related registers
1708 for (i = 0; i < 3; ++i)
1709 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1710 for (i = 0; i < 20; ++i)
1711 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1714 * Set WOL source address. XXX is this necessary?
1716 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1717 CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1718 val = (eaddr[0] << 8) | eaddr[1];
1719 CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1721 /* Clear packet filters */
1722 CSR_WRITE_4(sc, ET_PKTFILT, 0);
1724 /* No ucast filtering */
1725 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1726 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1727 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1729 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1731 * In order to transmit jumbo packets greater than
1732 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1733 * RX MAC and RX DMA needs to be reduced in size to
1734 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In
1735 * order to implement this, we must use "cut through"
1736 * mode in the RX MAC, which chops packets down into
1737 * segments. In this case we selected 256 bytes,
1738 * since this is the size of the PCI-Express TLP's
1739 * that the ET1310 uses.
1741 val = __SHIFTIN(ET_RXMAC_SEGSZ(256), ET_RXMAC_MC_SEGSZ_MAX) |
1742 ET_RXMAC_MC_SEGSZ_ENABLE;
1746 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1748 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1750 /* Initialize RX MAC management register */
1751 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1753 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1755 CSR_WRITE_4(sc, ET_RXMAC_MGT,
1756 ET_RXMAC_MGT_PASS_ECRC |
1757 ET_RXMAC_MGT_PASS_ELEN |
1758 ET_RXMAC_MGT_PASS_ETRUNC |
1759 ET_RXMAC_MGT_CHECK_PKT);
1762 * Configure runt filtering (may not work on certain chip generation)
1764 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG;
1765 CSR_WRITE_4(sc, ET_PKTFILT, val);
1767 /* Enable RX MAC but leave WOL disabled */
1768 CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1769 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1772 * Setup multicast hash and allmulti/promisc mode
1778 et_init_txmac(struct et_softc *sc)
1780 /* Disable TX MAC and FC(?) */
1781 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1783 /* No flow control yet */
1784 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1786 /* Enable TX MAC but leave FC(?) diabled */
1787 CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1788 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1792 et_start_rxdma(struct et_softc *sc)
1796 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize,
1797 ET_RXDMA_CTRL_RING0_SIZE) |
1798 ET_RXDMA_CTRL_RING0_ENABLE;
1799 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize,
1800 ET_RXDMA_CTRL_RING1_SIZE) |
1801 ET_RXDMA_CTRL_RING1_ENABLE;
1803 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1807 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1808 if_printf(sc->ifp, "can't start RX DMA engine\n");
1815 et_start_txdma(struct et_softc *sc)
1817 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1822 et_enable_txrx(struct et_softc *sc, int media_upd)
1824 struct ifnet *ifp = sc->ifp;
1828 val = CSR_READ_4(sc, ET_MAC_CFG1);
1829 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
1830 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
1831 ET_MAC_CFG1_LOOPBACK);
1832 CSR_WRITE_4(sc, ET_MAC_CFG1, val);
1835 et_ifmedia_upd_locked(ifp);
1841 for (i = 0; i < NRETRY; ++i) {
1842 val = CSR_READ_4(sc, ET_MAC_CFG1);
1843 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
1844 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
1850 if_printf(ifp, "can't enable RX/TX\n");
1853 sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
1858 * Start TX/RX DMA engine
1860 error = et_start_rxdma(sc);
1864 error = et_start_txdma(sc);
1872 et_rxeof(struct et_softc *sc)
1875 struct et_rxstatus_data *rxsd;
1876 struct et_rxstat_ring *rxst_ring;
1877 uint32_t rxs_stat_ring;
1878 int rxst_wrap, rxst_index;
1882 rxsd = &sc->sc_rx_status;
1883 rxst_ring = &sc->sc_rxstat_ring;
1885 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1888 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1889 BUS_DMASYNC_POSTREAD);
1890 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1891 BUS_DMASYNC_POSTREAD);
1893 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring;
1894 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
1895 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX);
1897 while (rxst_index != rxst_ring->rsr_index ||
1898 rxst_wrap != rxst_ring->rsr_wrap) {
1899 struct et_rxbuf_data *rbd;
1900 struct et_rxdesc_ring *rx_ring;
1901 struct et_rxstat *st;
1903 int buflen, buf_idx, ring_idx;
1904 uint32_t rxstat_pos, rxring_pos;
1906 MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
1907 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
1909 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN);
1910 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX);
1911 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX);
1913 if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
1914 rxst_ring->rsr_index = 0;
1915 rxst_ring->rsr_wrap ^= 1;
1917 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index,
1918 ET_RXSTAT_POS_INDEX);
1919 if (rxst_ring->rsr_wrap)
1920 rxstat_pos |= ET_RXSTAT_POS_WRAP;
1921 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
1923 if (ring_idx >= ET_RX_NRING) {
1925 if_printf(ifp, "invalid ring index %d\n", ring_idx);
1928 if (buf_idx >= ET_RX_NDESC) {
1930 if_printf(ifp, "invalid buf index %d\n", buf_idx);
1934 rbd = &sc->sc_rx_data[ring_idx];
1935 m = rbd->rbd_buf[buf_idx].rb_mbuf;
1937 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
1938 if (buflen < ETHER_CRC_LEN) {
1943 m->m_pkthdr.len = m->m_len = buflen;
1944 m->m_pkthdr.rcvif = ifp;
1946 m_adj(m, -ETHER_CRC_LEN);
1950 ifp->if_input(ifp, m);
1956 m = NULL; /* Catch invalid reference */
1958 rx_ring = &sc->sc_rx_ring[ring_idx];
1960 if (buf_idx != rx_ring->rr_index) {
1961 if_printf(ifp, "WARNING!! ring %d, "
1962 "buf_idx %d, rr_idx %d\n",
1963 ring_idx, buf_idx, rx_ring->rr_index);
1966 MPASS(rx_ring->rr_index < ET_RX_NDESC);
1967 if (++rx_ring->rr_index == ET_RX_NDESC) {
1968 rx_ring->rr_index = 0;
1969 rx_ring->rr_wrap ^= 1;
1971 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX);
1972 if (rx_ring->rr_wrap)
1973 rxring_pos |= ET_RX_RING_POS_WRAP;
1974 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
1979 et_encap(struct et_softc *sc, struct mbuf **m0)
1981 struct mbuf *m = *m0;
1982 bus_dma_segment_t segs[ET_NSEG_MAX];
1983 struct et_dmamap_ctx ctx;
1984 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1985 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1986 struct et_txdesc *td;
1988 int error, maxsegs, first_idx, last_idx, i;
1989 uint32_t tx_ready_pos, last_td_ctrl2;
1991 maxsegs = ET_TX_NDESC - tbd->tbd_used;
1992 if (maxsegs > ET_NSEG_MAX)
1993 maxsegs = ET_NSEG_MAX;
1994 KASSERT(maxsegs >= ET_NSEG_SPARE,
1995 ("not enough spare TX desc (%d)\n", maxsegs));
1997 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
1998 first_idx = tx_ring->tr_ready_index;
1999 map = tbd->tbd_buf[first_idx].tb_dmap;
2001 ctx.nsegs = maxsegs;
2003 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
2004 et_dma_buf_addr, &ctx, BUS_DMA_NOWAIT);
2005 if (!error && ctx.nsegs == 0) {
2006 bus_dmamap_unload(sc->sc_mbuf_dtag, map);
2009 if (error && error != EFBIG) {
2010 if_printf(sc->ifp, "can't load TX mbuf, error %d\n",
2014 if (error) { /* error == EFBIG */
2017 m_new = m_defrag(m, M_DONTWAIT);
2018 if (m_new == NULL) {
2019 if_printf(sc->ifp, "can't defrag TX mbuf\n");
2026 ctx.nsegs = maxsegs;
2028 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
2029 et_dma_buf_addr, &ctx,
2031 if (error || ctx.nsegs == 0) {
2032 if (ctx.nsegs == 0) {
2033 bus_dmamap_unload(sc->sc_mbuf_dtag, map);
2037 "can't load defraged TX mbuf\n");
2042 bus_dmamap_sync(sc->sc_mbuf_dtag, map, BUS_DMASYNC_PREWRITE);
2044 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2045 sc->sc_tx += ctx.nsegs;
2046 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2047 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2048 last_td_ctrl2 |= ET_TDCTRL2_INTR;
2052 for (i = 0; i < ctx.nsegs; ++i) {
2055 idx = (first_idx + i) % ET_TX_NDESC;
2056 td = &tx_ring->tr_desc[idx];
2057 td->td_addr_hi = ET_ADDR_HI(segs[i].ds_addr);
2058 td->td_addr_lo = ET_ADDR_LO(segs[i].ds_addr);
2059 td->td_ctrl1 = __SHIFTIN(segs[i].ds_len, ET_TDCTRL1_LEN);
2061 if (i == ctx.nsegs - 1) { /* Last frag */
2062 td->td_ctrl2 = last_td_ctrl2;
2066 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2067 if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2068 tx_ring->tr_ready_index = 0;
2069 tx_ring->tr_ready_wrap ^= 1;
2072 td = &tx_ring->tr_desc[first_idx];
2073 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */
2075 MPASS(last_idx >= 0);
2076 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2077 tbd->tbd_buf[last_idx].tb_dmap = map;
2078 tbd->tbd_buf[last_idx].tb_mbuf = m;
2080 tbd->tbd_used += ctx.nsegs;
2081 MPASS(tbd->tbd_used <= ET_TX_NDESC);
2083 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2084 BUS_DMASYNC_PREWRITE);
2086 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index,
2087 ET_TX_READY_POS_INDEX);
2088 if (tx_ring->tr_ready_wrap)
2089 tx_ready_pos |= ET_TX_READY_POS_WRAP;
2090 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
2102 et_txeof(struct et_softc *sc)
2105 struct et_txdesc_ring *tx_ring;
2106 struct et_txbuf_data *tbd;
2112 tx_ring = &sc->sc_tx_ring;
2113 tbd = &sc->sc_tx_data;
2115 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2118 if (tbd->tbd_used == 0)
2121 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2122 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX);
2123 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2125 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2126 struct et_txbuf *tb;
2128 MPASS(tbd->tbd_start_index < ET_TX_NDESC);
2129 tb = &tbd->tbd_buf[tbd->tbd_start_index];
2131 bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
2132 sizeof(struct et_txdesc));
2133 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2134 BUS_DMASYNC_PREWRITE);
2136 if (tb->tb_mbuf != NULL) {
2137 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
2138 m_freem(tb->tb_mbuf);
2143 if (++tbd->tbd_start_index == ET_TX_NDESC) {
2144 tbd->tbd_start_index = 0;
2145 tbd->tbd_start_wrap ^= 1;
2148 MPASS(tbd->tbd_used > 0);
2152 if (tbd->tbd_used == 0)
2153 sc->watchdog_timer = 0;
2154 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
2155 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2157 et_start_locked(ifp);
2163 struct et_softc *sc = xsc;
2165 struct mii_data *mii;
2169 mii = device_get_softc(sc->sc_miibus);
2172 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 &&
2173 (mii->mii_media_status & IFM_ACTIVE) &&
2174 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2175 if_printf(ifp, "Link up, enable TX/RX\n");
2176 if (et_enable_txrx(sc, 0) == 0)
2177 et_start_locked(ifp);
2180 callout_reset(&sc->sc_tick, hz, et_tick, sc);
2184 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
2186 return et_newbuf(rbd, buf_idx, init, MCLBYTES);
2190 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
2192 return et_newbuf(rbd, buf_idx, init, MHLEN);
2196 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
2198 struct et_softc *sc = rbd->rbd_softc;
2199 struct et_rxbuf *rb;
2201 struct et_dmamap_ctx ctx;
2202 bus_dma_segment_t seg;
2206 MPASS(buf_idx < ET_RX_NDESC);
2207 rb = &rbd->rbd_buf[buf_idx];
2209 m = m_getl(len0, /* init ? M_WAIT :*/ M_DONTWAIT, MT_DATA, M_PKTHDR, &len);
2215 "m_getl failed, size %d\n", len0);
2221 m->m_len = m->m_pkthdr.len = len;
2224 * Try load RX mbuf into temporary DMA tag
2228 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap, m,
2229 et_dma_buf_addr, &ctx,
2230 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2231 if (error || ctx.nsegs == 0) {
2233 bus_dmamap_unload(sc->sc_mbuf_dtag,
2234 sc->sc_mbuf_tmp_dmap);
2236 if_printf(sc->ifp, "too many segments?!\n");
2242 if_printf(sc->ifp, "can't load RX mbuf\n");
2250 bus_dmamap_sync(sc->sc_mbuf_dtag, rb->rb_dmap,
2251 BUS_DMASYNC_POSTREAD);
2252 bus_dmamap_unload(sc->sc_mbuf_dtag, rb->rb_dmap);
2255 rb->rb_paddr = seg.ds_addr;
2258 * Swap RX buf's DMA map with the loaded temporary one
2261 rb->rb_dmap = sc->sc_mbuf_tmp_dmap;
2262 sc->sc_mbuf_tmp_dmap = dmap;
2266 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr);
2271 * Create sysctl tree
2274 et_add_sysctls(struct et_softc * sc)
2276 struct sysctl_ctx_list *ctx;
2277 struct sysctl_oid_list *children;
2279 ctx = device_get_sysctl_ctx(sc->dev);
2280 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2282 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
2283 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I",
2284 "RX IM, # packets per RX interrupt");
2285 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
2286 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I",
2287 "RX IM, RX interrupt delay (x10 usec)");
2288 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
2289 CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
2290 "TX IM, # segments per TX interrupt");
2291 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
2292 CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
2296 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2298 struct et_softc *sc = arg1;
2299 struct ifnet *ifp = sc->ifp;
2302 v = sc->sc_rx_intr_npkts;
2303 error = sysctl_handle_int(oidp, &v, 0, req);
2304 if (error || req->newptr == NULL)
2311 if (sc->sc_rx_intr_npkts != v) {
2312 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2313 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2314 sc->sc_rx_intr_npkts = v;
2321 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2323 struct et_softc *sc = arg1;
2324 struct ifnet *ifp = sc->ifp;
2327 v = sc->sc_rx_intr_delay;
2328 error = sysctl_handle_int(oidp, &v, 0, req);
2329 if (error || req->newptr == NULL)
2336 if (sc->sc_rx_intr_delay != v) {
2337 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2338 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2339 sc->sc_rx_intr_delay = v;
2346 et_setmedia(struct et_softc *sc)
2348 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2349 uint32_t cfg2, ctrl;
2351 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
2352 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
2353 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
2354 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
2355 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN);
2357 ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
2358 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
2360 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
2361 cfg2 |= ET_MAC_CFG2_MODE_GMII;
2363 cfg2 |= ET_MAC_CFG2_MODE_MII;
2364 ctrl |= ET_MAC_CTRL_MODE_MII;
2367 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
2368 cfg2 |= ET_MAC_CFG2_FDX;
2370 ctrl |= ET_MAC_CTRL_GHDX;
2372 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
2373 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
2377 et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr)
2379 struct et_rxdesc_ring *rx_ring = rbd->rbd_ring;
2380 struct et_rxdesc *desc;
2382 MPASS(buf_idx < ET_RX_NDESC);
2383 desc = &rx_ring->rr_desc[buf_idx];
2385 desc->rd_addr_hi = ET_ADDR_HI(paddr);
2386 desc->rd_addr_lo = ET_ADDR_LO(paddr);
2387 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX);
2389 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
2390 BUS_DMASYNC_PREWRITE);