2 * Copyright (c) 2007 Sepherosa Ziehau. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/endian.h>
43 #include <sys/kernel.h>
45 #include <sys/malloc.h>
49 #include <sys/module.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
54 #include <net/ethernet.h>
56 #include <net/if_dl.h>
57 #include <net/if_types.h>
59 #include <net/if_arp.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_vlan_var.h>
64 #include <machine/bus.h>
66 #include <dev/mii/mii.h>
67 #include <dev/mii/miivar.h>
69 #include <dev/pci/pcireg.h>
70 #include <dev/pci/pcivar.h>
72 #include <dev/et/if_etreg.h>
73 #include <dev/et/if_etvar.h>
75 #include "miibus_if.h"
77 MODULE_DEPEND(et, pci, 1, 1, 1);
78 MODULE_DEPEND(et, ether, 1, 1, 1);
79 MODULE_DEPEND(et, miibus, 1, 1, 1);
82 static int msi_disable = 0;
83 TUNABLE_INT("hw.et.msi_disable", &msi_disable);
85 #define ET_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
87 static int et_probe(device_t);
88 static int et_attach(device_t);
89 static int et_detach(device_t);
90 static int et_shutdown(device_t);
92 static int et_miibus_readreg(device_t, int, int);
93 static int et_miibus_writereg(device_t, int, int, int);
94 static void et_miibus_statchg(device_t);
96 static void et_init_locked(struct et_softc *);
97 static void et_init(void *);
98 static int et_ioctl(struct ifnet *, u_long, caddr_t);
99 static void et_start_locked(struct ifnet *);
100 static void et_start(struct ifnet *);
101 static void et_watchdog(struct et_softc *);
102 static int et_ifmedia_upd_locked(struct ifnet *);
103 static int et_ifmedia_upd(struct ifnet *);
104 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
106 static void et_add_sysctls(struct et_softc *);
107 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
108 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
110 static void et_intr(void *);
111 static void et_enable_intrs(struct et_softc *, uint32_t);
112 static void et_disable_intrs(struct et_softc *);
113 static void et_rxeof(struct et_softc *);
114 static void et_txeof(struct et_softc *);
116 static int et_dma_alloc(device_t);
117 static void et_dma_free(device_t);
118 static int et_dma_mem_create(device_t, bus_size_t, bus_dma_tag_t *,
119 void **, bus_addr_t *, bus_dmamap_t *);
120 static void et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t);
121 static int et_dma_mbuf_create(device_t);
122 static void et_dma_mbuf_destroy(device_t, int, const int[]);
123 static void et_dma_ring_addr(void *, bus_dma_segment_t *, int, int);
124 static void et_dma_buf_addr(void *, bus_dma_segment_t *, int,
126 static int et_init_tx_ring(struct et_softc *);
127 static int et_init_rx_ring(struct et_softc *);
128 static void et_free_tx_ring(struct et_softc *);
129 static void et_free_rx_ring(struct et_softc *);
130 static int et_encap(struct et_softc *, struct mbuf **);
131 static int et_newbuf(struct et_rxbuf_data *, int, int, int);
132 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int);
133 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int);
135 static void et_stop(struct et_softc *);
136 static int et_chip_init(struct et_softc *);
137 static void et_chip_attach(struct et_softc *);
138 static void et_init_mac(struct et_softc *);
139 static void et_init_rxmac(struct et_softc *);
140 static void et_init_txmac(struct et_softc *);
141 static int et_init_rxdma(struct et_softc *);
142 static int et_init_txdma(struct et_softc *);
143 static int et_start_rxdma(struct et_softc *);
144 static int et_start_txdma(struct et_softc *);
145 static int et_stop_rxdma(struct et_softc *);
146 static int et_stop_txdma(struct et_softc *);
147 static int et_enable_txrx(struct et_softc *, int);
148 static void et_reset(struct et_softc *);
149 static int et_bus_config(struct et_softc *);
150 static void et_get_eaddr(device_t, uint8_t[]);
151 static void et_setmulti(struct et_softc *);
152 static void et_tick(void *);
153 static void et_setmedia(struct et_softc *);
154 static void et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t);
156 static const struct et_dev {
161 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
162 "Agere ET1310 Gigabit Ethernet" },
163 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
164 "Agere ET1310 Fast Ethernet" },
168 static device_method_t et_methods[] = {
169 DEVMETHOD(device_probe, et_probe),
170 DEVMETHOD(device_attach, et_attach),
171 DEVMETHOD(device_detach, et_detach),
172 DEVMETHOD(device_shutdown, et_shutdown),
174 DEVMETHOD(bus_print_child, bus_generic_print_child),
175 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
177 DEVMETHOD(miibus_readreg, et_miibus_readreg),
178 DEVMETHOD(miibus_writereg, et_miibus_writereg),
179 DEVMETHOD(miibus_statchg, et_miibus_statchg),
184 static driver_t et_driver = {
187 sizeof(struct et_softc)
190 static devclass_t et_devclass;
192 DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0);
193 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0);
195 static int et_rx_intr_npkts = 32;
196 static int et_rx_intr_delay = 20; /* x10 usec */
197 static int et_tx_intr_nsegs = 126;
198 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
200 TUNABLE_INT("hw.et.timer", &et_timer);
201 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
202 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
203 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
210 static const struct et_bsize et_bufsize_std[ET_RX_NRING] = {
211 { .bufsize = ET_RXDMA_CTRL_RING0_128,
212 .newbuf = et_newbuf_hdr },
213 { .bufsize = ET_RXDMA_CTRL_RING1_2048,
214 .newbuf = et_newbuf_cluster },
218 et_probe(device_t dev)
220 const struct et_dev *d;
223 vid = pci_get_vendor(dev);
224 did = pci_get_device(dev);
226 for (d = et_devices; d->desc != NULL; ++d) {
227 if (vid == d->vid && did == d->did) {
228 device_set_desc(dev, d->desc);
236 et_attach(device_t dev)
240 uint8_t eaddr[ETHER_ADDR_LEN];
241 int cap, error, msic;
243 sc = device_get_softc(dev);
245 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
248 ifp = sc->ifp = if_alloc(IFT_ETHER);
250 device_printf(dev, "can not if_alloc()\n");
256 * Initialize tunables
258 sc->sc_rx_intr_npkts = et_rx_intr_npkts;
259 sc->sc_rx_intr_delay = et_rx_intr_delay;
260 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
261 sc->sc_timer = et_timer;
263 /* Enable bus mastering */
264 pci_enable_busmaster(dev);
269 sc->sc_mem_rid = ET_PCIR_BAR;
270 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
271 &sc->sc_mem_rid, RF_ACTIVE);
272 if (sc->sc_mem_res == NULL) {
273 device_printf(dev, "can't allocate IO memory\n");
278 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
280 sc->sc_flags |= ET_FLAG_PCIE;
281 msic = pci_msi_count(dev);
283 device_printf(dev, "MSI count: %d\n", msic);
285 if (msic > 0 && msi_disable == 0) {
287 if (pci_alloc_msi(dev, &msic) == 0) {
289 device_printf(dev, "Using %d MSI message\n",
291 sc->sc_flags |= ET_FLAG_MSI;
293 pci_release_msi(dev);
300 if ((sc->sc_flags & ET_FLAG_MSI) == 0) {
302 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
303 &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE);
306 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
307 &sc->sc_irq_rid, RF_ACTIVE);
309 if (sc->sc_irq_res == NULL) {
310 device_printf(dev, "can't allocate irq\n");
315 error = et_bus_config(sc);
319 et_get_eaddr(dev, eaddr);
321 CSR_WRITE_4(sc, ET_PM,
322 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
326 et_disable_intrs(sc);
328 error = et_dma_alloc(dev);
333 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
334 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
335 ifp->if_init = et_init;
336 ifp->if_ioctl = et_ioctl;
337 ifp->if_start = et_start;
338 ifp->if_mtu = ETHERMTU;
339 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU;
340 ifp->if_capenable = ifp->if_capabilities;
341 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC);
342 IFQ_SET_READY(&ifp->if_snd);
346 error = mii_attach(dev, &sc->sc_miibus, ifp, et_ifmedia_upd,
347 et_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
349 device_printf(dev, "attaching PHYs failed\n");
353 ether_ifattach(ifp, eaddr);
354 callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
356 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
357 NULL, et_intr, sc, &sc->sc_irq_handle);
360 device_printf(dev, "can't setup intr\n");
373 et_detach(device_t dev)
375 struct et_softc *sc = device_get_softc(dev);
377 if (device_is_attached(dev)) {
378 struct ifnet *ifp = sc->ifp;
382 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
388 if (sc->sc_miibus != NULL)
389 device_delete_child(dev, sc->sc_miibus);
390 bus_generic_detach(dev);
392 if (sc->sc_irq_res != NULL) {
393 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
396 if ((sc->sc_flags & ET_FLAG_MSI) != 0)
397 pci_release_msi(dev);
399 if (sc->sc_mem_res != NULL) {
400 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
409 mtx_destroy(&sc->sc_mtx);
415 et_shutdown(device_t dev)
417 struct et_softc *sc = device_get_softc(dev);
426 et_miibus_readreg(device_t dev, int phy, int reg)
428 struct et_softc *sc = device_get_softc(dev);
432 /* Stop any pending operations */
433 CSR_WRITE_4(sc, ET_MII_CMD, 0);
435 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
436 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
437 CSR_WRITE_4(sc, ET_MII_ADDR, val);
440 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
444 for (i = 0; i < NRETRY; ++i) {
445 val = CSR_READ_4(sc, ET_MII_IND);
446 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
452 "read phy %d, reg %d timed out\n", phy, reg);
459 val = CSR_READ_4(sc, ET_MII_STAT);
460 ret = val & ET_MII_STAT_VALUE_MASK;
463 /* Make sure that the current operation is stopped */
464 CSR_WRITE_4(sc, ET_MII_CMD, 0);
469 et_miibus_writereg(device_t dev, int phy, int reg, int val0)
471 struct et_softc *sc = device_get_softc(dev);
475 /* Stop any pending operations */
476 CSR_WRITE_4(sc, ET_MII_CMD, 0);
478 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
479 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
480 CSR_WRITE_4(sc, ET_MII_ADDR, val);
483 CSR_WRITE_4(sc, ET_MII_CTRL,
484 (val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK);
488 for (i = 0; i < NRETRY; ++i) {
489 val = CSR_READ_4(sc, ET_MII_IND);
490 if ((val & ET_MII_IND_BUSY) == 0)
496 "write phy %d, reg %d timed out\n", phy, reg);
497 et_miibus_readreg(dev, phy, reg);
502 /* Make sure that the current operation is stopped */
503 CSR_WRITE_4(sc, ET_MII_CMD, 0);
508 et_miibus_statchg(device_t dev)
510 et_setmedia(device_get_softc(dev));
514 et_ifmedia_upd_locked(struct ifnet *ifp)
516 struct et_softc *sc = ifp->if_softc;
517 struct mii_data *mii = device_get_softc(sc->sc_miibus);
518 struct mii_softc *miisc;
520 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
528 et_ifmedia_upd(struct ifnet *ifp)
530 struct et_softc *sc = ifp->if_softc;
534 res = et_ifmedia_upd_locked(ifp);
541 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
543 struct et_softc *sc = ifp->if_softc;
544 struct mii_data *mii = device_get_softc(sc->sc_miibus);
547 ifmr->ifm_active = mii->mii_media_active;
548 ifmr->ifm_status = mii->mii_media_status;
552 et_stop(struct et_softc *sc)
554 struct ifnet *ifp = sc->ifp;
558 callout_stop(&sc->sc_tick);
563 et_disable_intrs(sc);
572 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
574 sc->watchdog_timer = 0;
575 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
579 et_bus_config(struct et_softc *sc)
581 uint32_t val, max_plsz;
582 uint16_t ack_latency, replay_timer;
585 * Test whether EEPROM is valid
586 * NOTE: Read twice to get the correct value
588 pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
589 val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
590 if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
591 device_printf(sc->dev, "EEPROM status error 0x%02x\n", val);
597 if ((sc->sc_flags & ET_FLAG_PCIE) == 0)
601 * Configure ACK latency and replay timer according to
604 val = pci_read_config(sc->dev,
605 sc->sc_expcap + PCIR_EXPRESS_DEVICE_CAP, 4);
606 max_plsz = val & PCIM_EXP_CAP_MAX_PAYLOAD;
609 case ET_PCIV_DEVICE_CAPS_PLSZ_128:
610 ack_latency = ET_PCIV_ACK_LATENCY_128;
611 replay_timer = ET_PCIV_REPLAY_TIMER_128;
614 case ET_PCIV_DEVICE_CAPS_PLSZ_256:
615 ack_latency = ET_PCIV_ACK_LATENCY_256;
616 replay_timer = ET_PCIV_REPLAY_TIMER_256;
620 ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2);
621 replay_timer = pci_read_config(sc->dev,
622 ET_PCIR_REPLAY_TIMER, 2);
623 device_printf(sc->dev, "ack latency %u, replay timer %u\n",
624 ack_latency, replay_timer);
627 if (ack_latency != 0) {
628 pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
629 pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer,
634 * Set L0s and L1 latency timer to 2us
636 val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4);
637 val &= ~(PCIM_LINK_CAP_L0S_EXIT | PCIM_LINK_CAP_L1_EXIT);
638 /* L0s exit latency : 2us */
640 /* L1 exit latency : 2us */
642 pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4);
645 * Set max read request size to 2048 bytes
647 val = pci_read_config(sc->dev,
648 sc->sc_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
649 val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST;
650 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
651 pci_write_config(sc->dev,
652 sc->sc_expcap + PCIR_EXPRESS_DEVICE_CTL, val, 2);
658 et_get_eaddr(device_t dev, uint8_t eaddr[])
663 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
664 for (i = 0; i < 4; ++i)
665 eaddr[i] = (val >> (8 * i)) & 0xff;
667 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
668 for (; i < ETHER_ADDR_LEN; ++i)
669 eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
673 et_reset(struct et_softc *sc)
675 CSR_WRITE_4(sc, ET_MAC_CFG1,
676 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
677 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
678 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
680 CSR_WRITE_4(sc, ET_SWRST,
681 ET_SWRST_TXDMA | ET_SWRST_RXDMA |
682 ET_SWRST_TXMAC | ET_SWRST_RXMAC |
683 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
685 CSR_WRITE_4(sc, ET_MAC_CFG1,
686 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
687 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
688 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
692 et_disable_intrs(struct et_softc *sc)
694 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
698 et_enable_intrs(struct et_softc *sc, uint32_t intrs)
700 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
704 et_dma_alloc(device_t dev)
706 struct et_softc *sc = device_get_softc(dev);
707 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
708 struct et_txstatus_data *txsd = &sc->sc_tx_status;
709 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
710 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
714 * Create top level DMA tag
716 error = bus_dma_tag_create(NULL, 1, 0,
717 BUS_SPACE_MAXADDR_32BIT,
721 BUS_SPACE_UNRESTRICTED,
722 BUS_SPACE_MAXSIZE_32BIT,
723 0, NULL, NULL, &sc->sc_dtag);
725 device_printf(dev, "can't create DMA tag\n");
730 * Create TX ring DMA stuffs
732 error = et_dma_mem_create(dev, ET_TX_RING_SIZE, &tx_ring->tr_dtag,
733 (void **)&tx_ring->tr_desc,
734 &tx_ring->tr_paddr, &tx_ring->tr_dmap);
736 device_printf(dev, "can't create TX ring DMA stuffs\n");
741 * Create TX status DMA stuffs
743 error = et_dma_mem_create(dev, sizeof(uint32_t), &txsd->txsd_dtag,
744 (void **)&txsd->txsd_status,
745 &txsd->txsd_paddr, &txsd->txsd_dmap);
747 device_printf(dev, "can't create TX status DMA stuffs\n");
752 * Create DMA stuffs for RX rings
754 for (i = 0; i < ET_RX_NRING; ++i) {
755 static const uint32_t rx_ring_posreg[ET_RX_NRING] =
756 { ET_RX_RING0_POS, ET_RX_RING1_POS };
758 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
760 error = et_dma_mem_create(dev, ET_RX_RING_SIZE,
762 (void **)&rx_ring->rr_desc,
766 device_printf(dev, "can't create DMA stuffs for "
767 "the %d RX ring\n", i);
770 rx_ring->rr_posreg = rx_ring_posreg[i];
774 * Create RX stat ring DMA stuffs
776 error = et_dma_mem_create(dev, ET_RXSTAT_RING_SIZE,
777 &rxst_ring->rsr_dtag,
778 (void **)&rxst_ring->rsr_stat,
779 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap);
781 device_printf(dev, "can't create RX stat ring DMA stuffs\n");
786 * Create RX status DMA stuffs
788 error = et_dma_mem_create(dev, sizeof(struct et_rxstatus),
790 (void **)&rxsd->rxsd_status,
791 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap);
793 device_printf(dev, "can't create RX status DMA stuffs\n");
798 * Create mbuf DMA stuffs
800 error = et_dma_mbuf_create(dev);
808 et_dma_free(device_t dev)
810 struct et_softc *sc = device_get_softc(dev);
811 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
812 struct et_txstatus_data *txsd = &sc->sc_tx_status;
813 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
814 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
815 int i, rx_done[ET_RX_NRING];
818 * Destroy TX ring DMA stuffs
820 et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc,
824 * Destroy TX status DMA stuffs
826 et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status,
830 * Destroy DMA stuffs for RX rings
832 for (i = 0; i < ET_RX_NRING; ++i) {
833 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
835 et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc,
840 * Destroy RX stat ring DMA stuffs
842 et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat,
843 rxst_ring->rsr_dmap);
846 * Destroy RX status DMA stuffs
848 et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status,
852 * Destroy mbuf DMA stuffs
854 for (i = 0; i < ET_RX_NRING; ++i)
855 rx_done[i] = ET_RX_NDESC;
856 et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done);
859 * Destroy top level DMA tag
861 if (sc->sc_dtag != NULL)
862 bus_dma_tag_destroy(sc->sc_dtag);
866 et_dma_mbuf_create(device_t dev)
868 struct et_softc *sc = device_get_softc(dev);
869 struct et_txbuf_data *tbd = &sc->sc_tx_data;
870 int i, error, rx_done[ET_RX_NRING];
873 * Create mbuf DMA tag
875 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
876 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
878 ET_JUMBO_FRAMELEN, ET_NSEG_MAX,
879 BUS_SPACE_MAXSIZE_32BIT,
880 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_mbuf_dtag);
882 device_printf(dev, "can't create mbuf DMA tag\n");
887 * Create spare DMA map for RX mbufs
889 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, &sc->sc_mbuf_tmp_dmap);
891 device_printf(dev, "can't create spare mbuf DMA map\n");
892 bus_dma_tag_destroy(sc->sc_mbuf_dtag);
893 sc->sc_mbuf_dtag = NULL;
898 * Create DMA maps for RX mbufs
900 bzero(rx_done, sizeof(rx_done));
901 for (i = 0; i < ET_RX_NRING; ++i) {
902 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
905 for (j = 0; j < ET_RX_NDESC; ++j) {
906 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
907 &rbd->rbd_buf[j].rb_dmap);
909 device_printf(dev, "can't create %d RX mbuf "
910 "for %d RX ring\n", j, i);
912 et_dma_mbuf_destroy(dev, 0, rx_done);
916 rx_done[i] = ET_RX_NDESC;
919 rbd->rbd_ring = &sc->sc_rx_ring[i];
923 * Create DMA maps for TX mbufs
925 for (i = 0; i < ET_TX_NDESC; ++i) {
926 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
927 &tbd->tbd_buf[i].tb_dmap);
929 device_printf(dev, "can't create %d TX mbuf "
931 et_dma_mbuf_destroy(dev, i, rx_done);
940 et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[])
942 struct et_softc *sc = device_get_softc(dev);
943 struct et_txbuf_data *tbd = &sc->sc_tx_data;
946 if (sc->sc_mbuf_dtag == NULL)
950 * Destroy DMA maps for RX mbufs
952 for (i = 0; i < ET_RX_NRING; ++i) {
953 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
956 for (j = 0; j < rx_done[i]; ++j) {
957 struct et_rxbuf *rb = &rbd->rbd_buf[j];
959 KASSERT(rb->rb_mbuf == NULL,
960 ("RX mbuf in %d RX ring is not freed yet\n", i));
961 bus_dmamap_destroy(sc->sc_mbuf_dtag, rb->rb_dmap);
966 * Destroy DMA maps for TX mbufs
968 for (i = 0; i < tx_done; ++i) {
969 struct et_txbuf *tb = &tbd->tbd_buf[i];
971 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n"));
972 bus_dmamap_destroy(sc->sc_mbuf_dtag, tb->tb_dmap);
976 * Destroy spare mbuf DMA map
978 bus_dmamap_destroy(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap);
981 * Destroy mbuf DMA tag
983 bus_dma_tag_destroy(sc->sc_mbuf_dtag);
984 sc->sc_mbuf_dtag = NULL;
988 et_dma_mem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag,
989 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap)
991 struct et_softc *sc = device_get_softc(dev);
994 error = bus_dma_tag_create(sc->sc_dtag, ET_ALIGN, 0,
995 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
997 size, 1, BUS_SPACE_MAXSIZE_32BIT,
998 0, NULL, NULL, dtag);
1000 device_printf(dev, "can't create DMA tag\n");
1004 error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1007 device_printf(dev, "can't allocate DMA mem\n");
1008 bus_dma_tag_destroy(*dtag);
1013 error = bus_dmamap_load(*dtag, *dmap, *addr, size,
1014 et_dma_ring_addr, paddr, BUS_DMA_WAITOK);
1016 device_printf(dev, "can't load DMA mem\n");
1017 bus_dmamem_free(*dtag, *addr, *dmap);
1018 bus_dma_tag_destroy(*dtag);
1026 et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap)
1029 bus_dmamap_unload(dtag, dmap);
1030 bus_dmamem_free(dtag, addr, dmap);
1031 bus_dma_tag_destroy(dtag);
1036 et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
1038 KASSERT(nseg == 1, ("too many segments\n"));
1039 *((bus_addr_t *)arg) = seg->ds_addr;
1043 et_chip_attach(struct et_softc *sc)
1048 * Perform minimal initialization
1051 /* Disable loopback */
1052 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1055 CSR_WRITE_4(sc, ET_MAC_CFG1,
1056 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1057 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1058 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1061 * Setup half duplex mode
1063 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1064 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1065 (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1066 ET_MAC_HDX_EXC_DEFER;
1067 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1069 /* Clear MAC control */
1070 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1073 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1075 /* Bring MAC out of reset state */
1076 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1078 /* Enable memory controllers */
1079 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1085 struct et_softc *sc = xsc;
1091 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1096 et_disable_intrs(sc);
1098 intrs = CSR_READ_4(sc, ET_INTR_STATUS);
1100 if (intrs == 0) /* Not interested */
1103 if (intrs & ET_INTR_RXEOF)
1105 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
1107 if (intrs & ET_INTR_TIMER)
1108 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1110 et_enable_intrs(sc, ET_INTRS);
1115 et_init_locked(struct et_softc *sc)
1117 struct ifnet *ifp = sc->ifp;
1118 const struct et_bsize *arr;
1123 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1128 arr = et_bufsize_std;
1129 for (i = 0; i < ET_RX_NRING; ++i) {
1130 sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize;
1131 sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf;
1134 error = et_init_tx_ring(sc);
1138 error = et_init_rx_ring(sc);
1142 error = et_chip_init(sc);
1146 error = et_enable_txrx(sc, 1);
1150 et_enable_intrs(sc, ET_INTRS);
1152 callout_reset(&sc->sc_tick, hz, et_tick, sc);
1154 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1156 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1157 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1166 struct et_softc *sc = xsc;
1174 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1176 struct et_softc *sc = ifp->if_softc;
1177 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1178 struct ifreq *ifr = (struct ifreq *)data;
1179 int error = 0, mask, max_framelen;
1185 if (ifp->if_flags & IFF_UP) {
1186 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1187 if ((ifp->if_flags ^ sc->sc_if_flags) &
1188 (IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1194 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1197 sc->sc_if_flags = ifp->if_flags;
1203 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1208 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1218 if (sc->sc_flags & ET_FLAG_JUMBO)
1219 max_framelen = ET_JUMBO_FRAMELEN;
1222 max_framelen = MCLBYTES - 1;
1224 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1229 if (ifp->if_mtu != ifr->ifr_mtu) {
1230 ifp->if_mtu = ifr->ifr_mtu;
1231 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1238 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1239 if ((mask & IFCAP_TXCSUM) != 0 &&
1240 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1241 ifp->if_capenable ^= IFCAP_TXCSUM;
1242 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1243 ifp->if_hwassist |= ET_CSUM_FEATURES;
1245 ifp->if_hwassist &= ~ET_CSUM_FEATURES;
1251 error = ether_ioctl(ifp, cmd, data);
1258 et_start_locked(struct ifnet *ifp)
1260 struct et_softc *sc = ifp->if_softc;
1261 struct et_txbuf_data *tbd;
1265 tbd = &sc->sc_tx_data;
1267 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1270 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING)
1277 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) {
1278 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1282 IFQ_DEQUEUE(&ifp->if_snd, m);
1286 if (et_encap(sc, &m)) {
1288 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1297 sc->watchdog_timer = 5;
1301 et_start(struct ifnet *ifp)
1303 struct et_softc *sc = ifp->if_softc;
1306 et_start_locked(ifp);
1311 et_watchdog(struct et_softc *sc)
1315 if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
1318 if_printf(sc->ifp, "watchdog timed out\n");
1320 sc->ifp->if_oerrors++;
1321 sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1323 et_start_locked(sc->ifp);
1327 et_stop_rxdma(struct et_softc *sc)
1329 CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1330 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1333 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1334 if_printf(sc->ifp, "can't stop RX DMA engine\n");
1341 et_stop_txdma(struct et_softc *sc)
1343 CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1344 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1349 et_free_tx_ring(struct et_softc *sc)
1351 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1352 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1355 for (i = 0; i < ET_TX_NDESC; ++i) {
1356 struct et_txbuf *tb = &tbd->tbd_buf[i];
1358 if (tb->tb_mbuf != NULL) {
1359 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1360 m_freem(tb->tb_mbuf);
1365 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1366 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1367 BUS_DMASYNC_PREWRITE);
1371 et_free_rx_ring(struct et_softc *sc)
1375 for (n = 0; n < ET_RX_NRING; ++n) {
1376 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1377 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
1380 for (i = 0; i < ET_RX_NDESC; ++i) {
1381 struct et_rxbuf *rb = &rbd->rbd_buf[i];
1383 if (rb->rb_mbuf != NULL) {
1384 bus_dmamap_unload(sc->sc_mbuf_dtag,
1386 m_freem(rb->rb_mbuf);
1391 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
1392 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
1393 BUS_DMASYNC_PREWRITE);
1398 et_setmulti(struct et_softc *sc)
1401 uint32_t hash[4] = { 0, 0, 0, 0 };
1402 uint32_t rxmac_ctrl, pktfilt;
1403 struct ifmultiaddr *ifma;
1409 pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1410 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1412 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1413 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1414 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1419 if_maddr_rlock(ifp);
1420 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1423 if (ifma->ifma_addr->sa_family != AF_LINK)
1426 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1427 ifma->ifma_addr), ETHER_ADDR_LEN);
1428 h = (h & 0x3f800000) >> 23;
1431 if (h >= 32 && h < 64) {
1434 } else if (h >= 64 && h < 96) {
1437 } else if (h >= 96) {
1445 if_maddr_runlock(ifp);
1447 for (i = 0; i < 4; ++i)
1448 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1451 pktfilt |= ET_PKTFILT_MCAST;
1452 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1454 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1455 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1459 et_chip_init(struct et_softc *sc)
1461 struct ifnet *ifp = sc->ifp;
1463 int error, frame_len, rxmem_size;
1466 * Split 16Kbytes internal memory between TX and RX
1467 * according to frame length.
1469 frame_len = ET_FRAMELEN(ifp->if_mtu);
1470 if (frame_len < 2048) {
1471 rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1472 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1473 rxmem_size = ET_MEM_SIZE / 2;
1475 rxmem_size = ET_MEM_SIZE -
1476 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1478 rxq_end = ET_QUEUE_ADDR(rxmem_size);
1480 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1481 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1482 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1483 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1486 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1488 /* Clear MSI configure */
1489 if ((sc->sc_flags & ET_FLAG_MSI) == 0)
1490 CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1493 CSR_WRITE_4(sc, ET_TIMER, 0);
1495 /* Initialize MAC */
1498 /* Enable memory controllers */
1499 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1501 /* Initialize RX MAC */
1504 /* Initialize TX MAC */
1507 /* Initialize RX DMA engine */
1508 error = et_init_rxdma(sc);
1512 /* Initialize TX DMA engine */
1513 error = et_init_txdma(sc);
1521 et_init_tx_ring(struct et_softc *sc)
1523 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1524 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1525 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1527 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1528 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1529 BUS_DMASYNC_PREWRITE);
1531 tbd->tbd_start_index = 0;
1532 tbd->tbd_start_wrap = 0;
1535 bzero(txsd->txsd_status, sizeof(uint32_t));
1536 bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1537 BUS_DMASYNC_PREWRITE);
1542 et_init_rx_ring(struct et_softc *sc)
1544 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1545 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1548 for (n = 0; n < ET_RX_NRING; ++n) {
1549 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1552 for (i = 0; i < ET_RX_NDESC; ++i) {
1553 error = rbd->rbd_newbuf(rbd, i, 1);
1555 if_printf(sc->ifp, "%d ring %d buf, "
1556 "newbuf failed: %d\n", n, i, error);
1562 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1563 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1564 BUS_DMASYNC_PREWRITE);
1566 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1567 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1568 BUS_DMASYNC_PREWRITE);
1574 et_dma_buf_addr(void *xctx, bus_dma_segment_t *segs, int nsegs,
1575 bus_size_t mapsz __unused, int error)
1577 struct et_dmamap_ctx *ctx = xctx;
1583 if (nsegs > ctx->nsegs) {
1589 for (i = 0; i < nsegs; ++i)
1590 ctx->segs[i] = segs[i];
1594 et_init_rxdma(struct et_softc *sc)
1596 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1597 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1598 struct et_rxdesc_ring *rx_ring;
1601 error = et_stop_rxdma(sc);
1603 if_printf(sc->ifp, "can't init RX DMA engine\n");
1610 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1611 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1614 * Install RX stat ring
1616 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1617 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1618 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1619 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1620 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1622 /* Match ET_RXSTAT_POS */
1623 rxst_ring->rsr_index = 0;
1624 rxst_ring->rsr_wrap = 0;
1627 * Install the 2nd RX descriptor ring
1629 rx_ring = &sc->sc_rx_ring[1];
1630 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1631 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1632 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1633 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1634 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1636 /* Match ET_RX_RING1_POS */
1637 rx_ring->rr_index = 0;
1638 rx_ring->rr_wrap = 1;
1641 * Install the 1st RX descriptor ring
1643 rx_ring = &sc->sc_rx_ring[0];
1644 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1645 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1646 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1647 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1648 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1650 /* Match ET_RX_RING0_POS */
1651 rx_ring->rr_index = 0;
1652 rx_ring->rr_wrap = 1;
1655 * RX intr moderation
1657 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1658 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1664 et_init_txdma(struct et_softc *sc)
1666 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1667 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1670 error = et_stop_txdma(sc);
1672 if_printf(sc->ifp, "can't init TX DMA engine\n");
1677 * Install TX descriptor ring
1679 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1680 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1681 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1686 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1687 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1689 CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1691 /* Match ET_TX_READY_POS */
1692 tx_ring->tr_ready_index = 0;
1693 tx_ring->tr_ready_wrap = 0;
1699 et_init_mac(struct et_softc *sc)
1701 struct ifnet *ifp = sc->ifp;
1702 const uint8_t *eaddr = IF_LLADDR(ifp);
1706 CSR_WRITE_4(sc, ET_MAC_CFG1,
1707 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1708 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1709 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1712 * Setup inter packet gap
1714 val = (56 << ET_IPG_NONB2B_1_SHIFT) |
1715 (88 << ET_IPG_NONB2B_2_SHIFT) |
1716 (80 << ET_IPG_MINIFG_SHIFT) |
1717 (96 << ET_IPG_B2B_SHIFT);
1718 CSR_WRITE_4(sc, ET_IPG, val);
1721 * Setup half duplex mode
1723 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1724 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1725 (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1726 ET_MAC_HDX_EXC_DEFER;
1727 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1729 /* Clear MAC control */
1730 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1733 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1738 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1739 CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1740 val = (eaddr[0] << 16) | (eaddr[1] << 24);
1741 CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1743 /* Set max frame length */
1744 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1746 /* Bring MAC out of reset state */
1747 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1751 et_init_rxmac(struct et_softc *sc)
1753 struct ifnet *ifp = sc->ifp;
1754 const uint8_t *eaddr = IF_LLADDR(ifp);
1758 /* Disable RX MAC and WOL */
1759 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1762 * Clear all WOL related registers
1764 for (i = 0; i < 3; ++i)
1765 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1766 for (i = 0; i < 20; ++i)
1767 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1770 * Set WOL source address. XXX is this necessary?
1772 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1773 CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1774 val = (eaddr[0] << 8) | eaddr[1];
1775 CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1777 /* Clear packet filters */
1778 CSR_WRITE_4(sc, ET_PKTFILT, 0);
1780 /* No ucast filtering */
1781 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1782 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1783 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1785 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1787 * In order to transmit jumbo packets greater than
1788 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1789 * RX MAC and RX DMA needs to be reduced in size to
1790 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In
1791 * order to implement this, we must use "cut through"
1792 * mode in the RX MAC, which chops packets down into
1793 * segments. In this case we selected 256 bytes,
1794 * since this is the size of the PCI-Express TLP's
1795 * that the ET1310 uses.
1797 val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) |
1798 ET_RXMAC_MC_SEGSZ_ENABLE;
1802 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1804 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1806 /* Initialize RX MAC management register */
1807 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1809 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1811 CSR_WRITE_4(sc, ET_RXMAC_MGT,
1812 ET_RXMAC_MGT_PASS_ECRC |
1813 ET_RXMAC_MGT_PASS_ELEN |
1814 ET_RXMAC_MGT_PASS_ETRUNC |
1815 ET_RXMAC_MGT_CHECK_PKT);
1818 * Configure runt filtering (may not work on certain chip generation)
1820 val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) &
1821 ET_PKTFILT_MINLEN_MASK;
1822 val |= ET_PKTFILT_FRAG;
1823 CSR_WRITE_4(sc, ET_PKTFILT, val);
1825 /* Enable RX MAC but leave WOL disabled */
1826 CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1827 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1830 * Setup multicast hash and allmulti/promisc mode
1836 et_init_txmac(struct et_softc *sc)
1838 /* Disable TX MAC and FC(?) */
1839 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1841 /* No flow control yet */
1842 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1844 /* Enable TX MAC but leave FC(?) diabled */
1845 CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1846 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1850 et_start_rxdma(struct et_softc *sc)
1854 val |= (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) |
1855 ET_RXDMA_CTRL_RING0_ENABLE;
1856 val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) |
1857 ET_RXDMA_CTRL_RING1_ENABLE;
1859 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1863 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1864 if_printf(sc->ifp, "can't start RX DMA engine\n");
1871 et_start_txdma(struct et_softc *sc)
1873 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1878 et_enable_txrx(struct et_softc *sc, int media_upd)
1880 struct ifnet *ifp = sc->ifp;
1884 val = CSR_READ_4(sc, ET_MAC_CFG1);
1885 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
1886 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
1887 ET_MAC_CFG1_LOOPBACK);
1888 CSR_WRITE_4(sc, ET_MAC_CFG1, val);
1891 et_ifmedia_upd_locked(ifp);
1897 for (i = 0; i < NRETRY; ++i) {
1898 val = CSR_READ_4(sc, ET_MAC_CFG1);
1899 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
1900 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
1906 if_printf(ifp, "can't enable RX/TX\n");
1909 sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
1914 * Start TX/RX DMA engine
1916 error = et_start_rxdma(sc);
1920 error = et_start_txdma(sc);
1928 et_rxeof(struct et_softc *sc)
1931 struct et_rxstatus_data *rxsd;
1932 struct et_rxstat_ring *rxst_ring;
1933 uint32_t rxs_stat_ring, rxst_info2;
1934 int rxst_wrap, rxst_index;
1938 rxsd = &sc->sc_rx_status;
1939 rxst_ring = &sc->sc_rxstat_ring;
1941 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1944 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1945 BUS_DMASYNC_POSTREAD);
1946 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1947 BUS_DMASYNC_POSTREAD);
1949 rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring);
1950 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
1951 rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >>
1952 ET_RXS_STATRING_INDEX_SHIFT;
1954 while (rxst_index != rxst_ring->rsr_index ||
1955 rxst_wrap != rxst_ring->rsr_wrap) {
1956 struct et_rxbuf_data *rbd;
1957 struct et_rxdesc_ring *rx_ring;
1958 struct et_rxstat *st;
1960 int buflen, buf_idx, ring_idx;
1961 uint32_t rxstat_pos, rxring_pos;
1963 MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
1964 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
1965 rxst_info2 = le32toh(st->rxst_info2);
1966 buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >>
1967 ET_RXST_INFO2_LEN_SHIFT;
1968 buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >>
1969 ET_RXST_INFO2_BUFIDX_SHIFT;
1970 ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >>
1971 ET_RXST_INFO2_RINGIDX_SHIFT;
1973 if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
1974 rxst_ring->rsr_index = 0;
1975 rxst_ring->rsr_wrap ^= 1;
1977 rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK;
1978 if (rxst_ring->rsr_wrap)
1979 rxstat_pos |= ET_RXSTAT_POS_WRAP;
1980 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
1982 if (ring_idx >= ET_RX_NRING) {
1984 if_printf(ifp, "invalid ring index %d\n", ring_idx);
1987 if (buf_idx >= ET_RX_NDESC) {
1989 if_printf(ifp, "invalid buf index %d\n", buf_idx);
1993 rbd = &sc->sc_rx_data[ring_idx];
1994 m = rbd->rbd_buf[buf_idx].rb_mbuf;
1996 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
1997 if (buflen < ETHER_CRC_LEN) {
2002 m->m_pkthdr.len = m->m_len =
2003 buflen - ETHER_CRC_LEN;
2004 m->m_pkthdr.rcvif = ifp;
2007 ifp->if_input(ifp, m);
2013 m = NULL; /* Catch invalid reference */
2015 rx_ring = &sc->sc_rx_ring[ring_idx];
2017 if (buf_idx != rx_ring->rr_index) {
2018 if_printf(ifp, "WARNING!! ring %d, "
2019 "buf_idx %d, rr_idx %d\n",
2020 ring_idx, buf_idx, rx_ring->rr_index);
2023 MPASS(rx_ring->rr_index < ET_RX_NDESC);
2024 if (++rx_ring->rr_index == ET_RX_NDESC) {
2025 rx_ring->rr_index = 0;
2026 rx_ring->rr_wrap ^= 1;
2028 rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK;
2029 if (rx_ring->rr_wrap)
2030 rxring_pos |= ET_RX_RING_POS_WRAP;
2031 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
2036 et_encap(struct et_softc *sc, struct mbuf **m0)
2038 struct mbuf *m = *m0;
2039 bus_dma_segment_t segs[ET_NSEG_MAX];
2040 struct et_dmamap_ctx ctx;
2041 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
2042 struct et_txbuf_data *tbd = &sc->sc_tx_data;
2043 struct et_txdesc *td;
2045 int error, maxsegs, first_idx, last_idx, i;
2046 uint32_t csum_flags, tx_ready_pos, last_td_ctrl2;
2048 maxsegs = ET_TX_NDESC - tbd->tbd_used;
2049 if (maxsegs > ET_NSEG_MAX)
2050 maxsegs = ET_NSEG_MAX;
2051 KASSERT(maxsegs >= ET_NSEG_SPARE,
2052 ("not enough spare TX desc (%d)\n", maxsegs));
2054 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2055 first_idx = tx_ring->tr_ready_index;
2056 map = tbd->tbd_buf[first_idx].tb_dmap;
2058 ctx.nsegs = maxsegs;
2060 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
2061 et_dma_buf_addr, &ctx, BUS_DMA_NOWAIT);
2062 if (!error && ctx.nsegs == 0) {
2063 bus_dmamap_unload(sc->sc_mbuf_dtag, map);
2066 if (error && error != EFBIG) {
2067 if_printf(sc->ifp, "can't load TX mbuf, error %d\n",
2071 if (error) { /* error == EFBIG */
2074 m_new = m_defrag(m, M_DONTWAIT);
2075 if (m_new == NULL) {
2076 if_printf(sc->ifp, "can't defrag TX mbuf\n");
2083 ctx.nsegs = maxsegs;
2085 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
2086 et_dma_buf_addr, &ctx,
2088 if (error || ctx.nsegs == 0) {
2089 if (ctx.nsegs == 0) {
2090 bus_dmamap_unload(sc->sc_mbuf_dtag, map);
2094 "can't load defraged TX mbuf\n");
2099 bus_dmamap_sync(sc->sc_mbuf_dtag, map, BUS_DMASYNC_PREWRITE);
2101 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2102 sc->sc_tx += ctx.nsegs;
2103 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2104 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2105 last_td_ctrl2 |= ET_TDCTRL2_INTR;
2109 if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) {
2110 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2111 csum_flags |= ET_TDCTRL2_CSUM_IP;
2112 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2113 csum_flags |= ET_TDCTRL2_CSUM_UDP;
2114 else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2115 csum_flags |= ET_TDCTRL2_CSUM_TCP;
2118 for (i = 0; i < ctx.nsegs; ++i) {
2121 idx = (first_idx + i) % ET_TX_NDESC;
2122 td = &tx_ring->tr_desc[idx];
2123 td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr));
2124 td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr));
2125 td->td_ctrl1 = htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK);
2126 if (i == ctx.nsegs - 1) { /* Last frag */
2127 td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags);
2130 td->td_ctrl2 = htole32(csum_flags);
2132 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2133 if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2134 tx_ring->tr_ready_index = 0;
2135 tx_ring->tr_ready_wrap ^= 1;
2138 td = &tx_ring->tr_desc[first_idx];
2139 td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG); /* First frag */
2141 MPASS(last_idx >= 0);
2142 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2143 tbd->tbd_buf[last_idx].tb_dmap = map;
2144 tbd->tbd_buf[last_idx].tb_mbuf = m;
2146 tbd->tbd_used += ctx.nsegs;
2147 MPASS(tbd->tbd_used <= ET_TX_NDESC);
2149 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2150 BUS_DMASYNC_PREWRITE);
2152 tx_ready_pos = tx_ring->tr_ready_index & ET_TX_READY_POS_INDEX_MASK;
2153 if (tx_ring->tr_ready_wrap)
2154 tx_ready_pos |= ET_TX_READY_POS_WRAP;
2155 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
2167 et_txeof(struct et_softc *sc)
2170 struct et_txdesc_ring *tx_ring;
2171 struct et_txbuf_data *tbd;
2177 tx_ring = &sc->sc_tx_ring;
2178 tbd = &sc->sc_tx_data;
2180 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2183 if (tbd->tbd_used == 0)
2186 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2187 end = tx_done & ET_TX_DONE_POS_INDEX_MASK;
2188 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2190 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2191 struct et_txbuf *tb;
2193 MPASS(tbd->tbd_start_index < ET_TX_NDESC);
2194 tb = &tbd->tbd_buf[tbd->tbd_start_index];
2196 bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
2197 sizeof(struct et_txdesc));
2198 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2199 BUS_DMASYNC_PREWRITE);
2201 if (tb->tb_mbuf != NULL) {
2202 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
2203 m_freem(tb->tb_mbuf);
2208 if (++tbd->tbd_start_index == ET_TX_NDESC) {
2209 tbd->tbd_start_index = 0;
2210 tbd->tbd_start_wrap ^= 1;
2213 MPASS(tbd->tbd_used > 0);
2217 if (tbd->tbd_used == 0)
2218 sc->watchdog_timer = 0;
2219 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
2220 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2222 et_start_locked(ifp);
2228 struct et_softc *sc = xsc;
2230 struct mii_data *mii;
2234 mii = device_get_softc(sc->sc_miibus);
2237 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 &&
2238 (mii->mii_media_status & IFM_ACTIVE) &&
2239 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2240 if_printf(ifp, "Link up, enable TX/RX\n");
2241 if (et_enable_txrx(sc, 0) == 0)
2242 et_start_locked(ifp);
2245 callout_reset(&sc->sc_tick, hz, et_tick, sc);
2249 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
2251 return (et_newbuf(rbd, buf_idx, init, MCLBYTES));
2255 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
2257 return (et_newbuf(rbd, buf_idx, init, MHLEN));
2261 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
2263 struct et_softc *sc = rbd->rbd_softc;
2264 struct et_rxbuf *rb;
2266 struct et_dmamap_ctx ctx;
2267 bus_dma_segment_t seg;
2271 MPASS(buf_idx < ET_RX_NDESC);
2272 rb = &rbd->rbd_buf[buf_idx];
2274 m = m_getl(len0, /* init ? M_WAIT :*/ M_DONTWAIT, MT_DATA, M_PKTHDR, &len);
2280 "m_getl failed, size %d\n", len0);
2286 m->m_len = m->m_pkthdr.len = len;
2289 * Try load RX mbuf into temporary DMA tag
2293 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap, m,
2294 et_dma_buf_addr, &ctx,
2295 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2296 if (error || ctx.nsegs == 0) {
2298 bus_dmamap_unload(sc->sc_mbuf_dtag,
2299 sc->sc_mbuf_tmp_dmap);
2301 if_printf(sc->ifp, "too many segments?!\n");
2307 if_printf(sc->ifp, "can't load RX mbuf\n");
2315 bus_dmamap_sync(sc->sc_mbuf_dtag, rb->rb_dmap,
2316 BUS_DMASYNC_POSTREAD);
2317 bus_dmamap_unload(sc->sc_mbuf_dtag, rb->rb_dmap);
2320 rb->rb_paddr = seg.ds_addr;
2323 * Swap RX buf's DMA map with the loaded temporary one
2326 rb->rb_dmap = sc->sc_mbuf_tmp_dmap;
2327 sc->sc_mbuf_tmp_dmap = dmap;
2331 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr);
2336 * Create sysctl tree
2339 et_add_sysctls(struct et_softc * sc)
2341 struct sysctl_ctx_list *ctx;
2342 struct sysctl_oid_list *children;
2344 ctx = device_get_sysctl_ctx(sc->dev);
2345 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2347 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
2348 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I",
2349 "RX IM, # packets per RX interrupt");
2350 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
2351 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I",
2352 "RX IM, RX interrupt delay (x10 usec)");
2353 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
2354 CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
2355 "TX IM, # segments per TX interrupt");
2356 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
2357 CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
2361 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2363 struct et_softc *sc = arg1;
2364 struct ifnet *ifp = sc->ifp;
2367 v = sc->sc_rx_intr_npkts;
2368 error = sysctl_handle_int(oidp, &v, 0, req);
2369 if (error || req->newptr == NULL)
2376 if (sc->sc_rx_intr_npkts != v) {
2377 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2378 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2379 sc->sc_rx_intr_npkts = v;
2386 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2388 struct et_softc *sc = arg1;
2389 struct ifnet *ifp = sc->ifp;
2392 v = sc->sc_rx_intr_delay;
2393 error = sysctl_handle_int(oidp, &v, 0, req);
2394 if (error || req->newptr == NULL)
2401 if (sc->sc_rx_intr_delay != v) {
2402 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2403 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2404 sc->sc_rx_intr_delay = v;
2411 et_setmedia(struct et_softc *sc)
2413 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2414 uint32_t cfg2, ctrl;
2416 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
2417 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
2418 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
2419 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
2420 ((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) &
2421 ET_MAC_CFG2_PREAMBLE_LEN_MASK);
2423 ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
2424 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
2426 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
2427 cfg2 |= ET_MAC_CFG2_MODE_GMII;
2429 cfg2 |= ET_MAC_CFG2_MODE_MII;
2430 ctrl |= ET_MAC_CTRL_MODE_MII;
2433 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
2434 cfg2 |= ET_MAC_CFG2_FDX;
2436 ctrl |= ET_MAC_CTRL_GHDX;
2438 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
2439 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
2443 et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr)
2445 struct et_rxdesc_ring *rx_ring = rbd->rbd_ring;
2446 struct et_rxdesc *desc;
2448 MPASS(buf_idx < ET_RX_NDESC);
2449 desc = &rx_ring->rr_desc[buf_idx];
2451 desc->rd_addr_hi = htole32(ET_ADDR_HI(paddr));
2452 desc->rd_addr_lo = htole32(ET_ADDR_LO(paddr));
2453 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2455 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
2456 BUS_DMASYNC_PREWRITE);