2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/endian.h>
43 #include <sys/kernel.h>
45 #include <sys/malloc.h>
49 #include <sys/module.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
54 #include <net/ethernet.h>
56 #include <net/if_dl.h>
57 #include <net/if_types.h>
59 #include <net/if_arp.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_vlan_var.h>
64 #include <machine/bus.h>
66 #include <dev/mii/miivar.h>
67 #include <dev/mii/truephyreg.h>
69 #include <dev/pci/pcireg.h>
70 #include <dev/pci/pcivar.h>
72 #include <dev/et/if_etreg.h>
73 #include <dev/et/if_etvar.h>
75 #include "miibus_if.h"
77 MODULE_DEPEND(et, pci, 1, 1, 1);
78 MODULE_DEPEND(et, ether, 1, 1, 1);
79 MODULE_DEPEND(et, miibus, 1, 1, 1);
82 static int msi_disable = 0;
83 TUNABLE_INT("hw.et.msi_disable", &msi_disable);
85 #define ET_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
87 static int et_probe(device_t);
88 static int et_attach(device_t);
89 static int et_detach(device_t);
90 static int et_shutdown(device_t);
92 static int et_miibus_readreg(device_t, int, int);
93 static int et_miibus_writereg(device_t, int, int, int);
94 static void et_miibus_statchg(device_t);
96 static void et_init_locked(struct et_softc *);
97 static void et_init(void *);
98 static int et_ioctl(struct ifnet *, u_long, caddr_t);
99 static void et_start_locked(struct ifnet *);
100 static void et_start(struct ifnet *);
101 static void et_watchdog(struct et_softc *);
102 static int et_ifmedia_upd_locked(struct ifnet *);
103 static int et_ifmedia_upd(struct ifnet *);
104 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
106 static void et_add_sysctls(struct et_softc *);
107 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
108 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
110 static void et_intr(void *);
111 static void et_enable_intrs(struct et_softc *, uint32_t);
112 static void et_disable_intrs(struct et_softc *);
113 static void et_rxeof(struct et_softc *);
114 static void et_txeof(struct et_softc *);
116 static int et_dma_alloc(device_t);
117 static void et_dma_free(device_t);
118 static int et_dma_mem_create(device_t, bus_size_t, bus_dma_tag_t *,
119 void **, bus_addr_t *, bus_dmamap_t *);
120 static void et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t);
121 static int et_dma_mbuf_create(device_t);
122 static void et_dma_mbuf_destroy(device_t, int, const int[]);
123 static void et_dma_ring_addr(void *, bus_dma_segment_t *, int, int);
124 static void et_dma_buf_addr(void *, bus_dma_segment_t *, int,
126 static int et_init_tx_ring(struct et_softc *);
127 static int et_init_rx_ring(struct et_softc *);
128 static void et_free_tx_ring(struct et_softc *);
129 static void et_free_rx_ring(struct et_softc *);
130 static int et_encap(struct et_softc *, struct mbuf **);
131 static int et_newbuf(struct et_rxbuf_data *, int, int, int);
132 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int);
133 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int);
135 static void et_stop(struct et_softc *);
136 static int et_chip_init(struct et_softc *);
137 static void et_chip_attach(struct et_softc *);
138 static void et_init_mac(struct et_softc *);
139 static void et_init_rxmac(struct et_softc *);
140 static void et_init_txmac(struct et_softc *);
141 static int et_init_rxdma(struct et_softc *);
142 static int et_init_txdma(struct et_softc *);
143 static int et_start_rxdma(struct et_softc *);
144 static int et_start_txdma(struct et_softc *);
145 static int et_stop_rxdma(struct et_softc *);
146 static int et_stop_txdma(struct et_softc *);
147 static int et_enable_txrx(struct et_softc *, int);
148 static void et_reset(struct et_softc *);
149 static int et_bus_config(struct et_softc *);
150 static void et_get_eaddr(device_t, uint8_t[]);
151 static void et_setmulti(struct et_softc *);
152 static void et_tick(void *);
153 static void et_setmedia(struct et_softc *);
154 static void et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t);
156 static const struct et_dev {
161 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
162 "Agere ET1310 Gigabit Ethernet" },
163 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
164 "Agere ET1310 Fast Ethernet" },
168 static device_method_t et_methods[] = {
169 DEVMETHOD(device_probe, et_probe),
170 DEVMETHOD(device_attach, et_attach),
171 DEVMETHOD(device_detach, et_detach),
172 DEVMETHOD(device_shutdown, et_shutdown),
174 DEVMETHOD(bus_print_child, bus_generic_print_child),
175 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
177 DEVMETHOD(miibus_readreg, et_miibus_readreg),
178 DEVMETHOD(miibus_writereg, et_miibus_writereg),
179 DEVMETHOD(miibus_statchg, et_miibus_statchg),
184 static driver_t et_driver = {
187 sizeof(struct et_softc)
190 static devclass_t et_devclass;
192 DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0);
193 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0);
195 static int et_rx_intr_npkts = 32;
196 static int et_rx_intr_delay = 20; /* x10 usec */
197 static int et_tx_intr_nsegs = 126;
198 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
200 TUNABLE_INT("hw.et.timer", &et_timer);
201 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
202 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
203 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
210 static const struct et_bsize et_bufsize_std[ET_RX_NRING] = {
211 { .bufsize = ET_RXDMA_CTRL_RING0_128,
212 .newbuf = et_newbuf_hdr },
213 { .bufsize = ET_RXDMA_CTRL_RING1_2048,
214 .newbuf = et_newbuf_cluster },
218 et_probe(device_t dev)
220 const struct et_dev *d;
223 vid = pci_get_vendor(dev);
224 did = pci_get_device(dev);
226 for (d = et_devices; d->desc != NULL; ++d) {
227 if (vid == d->vid && did == d->did) {
228 device_set_desc(dev, d->desc);
236 et_attach(device_t dev)
240 uint8_t eaddr[ETHER_ADDR_LEN];
241 int cap, error, msic;
243 sc = device_get_softc(dev);
245 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
248 ifp = sc->ifp = if_alloc(IFT_ETHER);
250 device_printf(dev, "can not if_alloc()\n");
256 * Initialize tunables
258 sc->sc_rx_intr_npkts = et_rx_intr_npkts;
259 sc->sc_rx_intr_delay = et_rx_intr_delay;
260 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
261 sc->sc_timer = et_timer;
263 /* Enable bus mastering */
264 pci_enable_busmaster(dev);
269 sc->sc_mem_rid = ET_PCIR_BAR;
270 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
271 &sc->sc_mem_rid, RF_ACTIVE);
272 if (sc->sc_mem_res == NULL) {
273 device_printf(dev, "can't allocate IO memory\n");
278 if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) == 0) {
280 sc->sc_flags |= ET_FLAG_PCIE;
281 msic = pci_msi_count(dev);
283 device_printf(dev, "MSI count: %d\n", msic);
285 if (msic > 0 && msi_disable == 0) {
287 if (pci_alloc_msi(dev, &msic) == 0) {
289 device_printf(dev, "Using %d MSI message\n",
291 sc->sc_flags |= ET_FLAG_MSI;
293 pci_release_msi(dev);
300 if ((sc->sc_flags & ET_FLAG_MSI) == 0) {
302 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
303 &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE);
306 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
307 &sc->sc_irq_rid, RF_ACTIVE);
309 if (sc->sc_irq_res == NULL) {
310 device_printf(dev, "can't allocate irq\n");
315 error = et_bus_config(sc);
319 et_get_eaddr(dev, eaddr);
321 CSR_WRITE_4(sc, ET_PM,
322 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
326 et_disable_intrs(sc);
328 error = et_dma_alloc(dev);
333 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
334 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
335 ifp->if_init = et_init;
336 ifp->if_ioctl = et_ioctl;
337 ifp->if_start = et_start;
338 ifp->if_mtu = ETHERMTU;
339 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU;
340 ifp->if_capenable = ifp->if_capabilities;
341 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC);
342 IFQ_SET_READY(&ifp->if_snd);
346 error = mii_phy_probe(dev, &sc->sc_miibus,
347 et_ifmedia_upd, et_ifmedia_sts);
349 device_printf(dev, "can't probe any PHY\n");
353 ether_ifattach(ifp, eaddr);
354 callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
356 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
357 NULL, et_intr, sc, &sc->sc_irq_handle);
360 device_printf(dev, "can't setup intr\n");
373 et_detach(device_t dev)
375 struct et_softc *sc = device_get_softc(dev);
377 if (device_is_attached(dev)) {
378 struct ifnet *ifp = sc->ifp;
382 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
388 if (sc->sc_miibus != NULL)
389 device_delete_child(dev, sc->sc_miibus);
390 bus_generic_detach(dev);
392 if (sc->sc_irq_res != NULL) {
393 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
396 if ((sc->sc_flags & ET_FLAG_MSI) != 0)
397 pci_release_msi(dev);
399 if (sc->sc_mem_res != NULL) {
400 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
409 mtx_destroy(&sc->sc_mtx);
415 et_shutdown(device_t dev)
417 struct et_softc *sc = device_get_softc(dev);
426 et_miibus_readreg(device_t dev, int phy, int reg)
428 struct et_softc *sc = device_get_softc(dev);
432 /* Stop any pending operations */
433 CSR_WRITE_4(sc, ET_MII_CMD, 0);
435 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
436 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
437 CSR_WRITE_4(sc, ET_MII_ADDR, val);
440 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
444 for (i = 0; i < NRETRY; ++i) {
445 val = CSR_READ_4(sc, ET_MII_IND);
446 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
452 "read phy %d, reg %d timed out\n", phy, reg);
459 val = CSR_READ_4(sc, ET_MII_STAT);
460 ret = val & ET_MII_STAT_VALUE_MASK;
463 /* Make sure that the current operation is stopped */
464 CSR_WRITE_4(sc, ET_MII_CMD, 0);
469 et_miibus_writereg(device_t dev, int phy, int reg, int val0)
471 struct et_softc *sc = device_get_softc(dev);
475 /* Stop any pending operations */
476 CSR_WRITE_4(sc, ET_MII_CMD, 0);
478 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
479 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
480 CSR_WRITE_4(sc, ET_MII_ADDR, val);
483 CSR_WRITE_4(sc, ET_MII_CTRL,
484 (val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK);
488 for (i = 0; i < NRETRY; ++i) {
489 val = CSR_READ_4(sc, ET_MII_IND);
490 if ((val & ET_MII_IND_BUSY) == 0)
496 "write phy %d, reg %d timed out\n", phy, reg);
497 et_miibus_readreg(dev, phy, reg);
502 /* Make sure that the current operation is stopped */
503 CSR_WRITE_4(sc, ET_MII_CMD, 0);
508 et_miibus_statchg(device_t dev)
510 et_setmedia(device_get_softc(dev));
514 et_ifmedia_upd_locked(struct ifnet *ifp)
516 struct et_softc *sc = ifp->if_softc;
517 struct mii_data *mii = device_get_softc(sc->sc_miibus);
519 if (mii->mii_instance != 0) {
520 struct mii_softc *miisc;
522 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
523 mii_phy_reset(miisc);
531 et_ifmedia_upd(struct ifnet *ifp)
533 struct et_softc *sc = ifp->if_softc;
537 res = et_ifmedia_upd_locked(ifp);
544 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
546 struct et_softc *sc = ifp->if_softc;
547 struct mii_data *mii = device_get_softc(sc->sc_miibus);
550 ifmr->ifm_active = mii->mii_media_active;
551 ifmr->ifm_status = mii->mii_media_status;
555 et_stop(struct et_softc *sc)
557 struct ifnet *ifp = sc->ifp;
561 callout_stop(&sc->sc_tick);
566 et_disable_intrs(sc);
575 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
577 sc->watchdog_timer = 0;
578 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
582 et_bus_config(struct et_softc *sc)
584 uint32_t val, max_plsz;
585 uint16_t ack_latency, replay_timer;
588 * Test whether EEPROM is valid
589 * NOTE: Read twice to get the correct value
591 pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
592 val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
593 if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
594 device_printf(sc->dev, "EEPROM status error 0x%02x\n", val);
600 if ((sc->sc_flags & ET_FLAG_PCIE) == 0)
604 * Configure ACK latency and replay timer according to
607 val = pci_read_config(sc->dev,
608 sc->sc_expcap + PCIR_EXPRESS_DEVICE_CAP, 4);
609 max_plsz = val & PCIM_EXP_CAP_MAX_PAYLOAD;
612 case ET_PCIV_DEVICE_CAPS_PLSZ_128:
613 ack_latency = ET_PCIV_ACK_LATENCY_128;
614 replay_timer = ET_PCIV_REPLAY_TIMER_128;
617 case ET_PCIV_DEVICE_CAPS_PLSZ_256:
618 ack_latency = ET_PCIV_ACK_LATENCY_256;
619 replay_timer = ET_PCIV_REPLAY_TIMER_256;
623 ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2);
624 replay_timer = pci_read_config(sc->dev,
625 ET_PCIR_REPLAY_TIMER, 2);
626 device_printf(sc->dev, "ack latency %u, replay timer %u\n",
627 ack_latency, replay_timer);
630 if (ack_latency != 0) {
631 pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
632 pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer,
637 * Set L0s and L1 latency timer to 2us
639 val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4);
640 val &= ~(PCIM_LINK_CAP_L0S_EXIT | PCIM_LINK_CAP_L1_EXIT);
641 /* L0s exit latency : 2us */
643 /* L1 exit latency : 2us */
645 pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4);
648 * Set max read request size to 2048 bytes
650 val = pci_read_config(sc->dev,
651 sc->sc_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
652 val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST;
653 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
654 pci_write_config(sc->dev,
655 sc->sc_expcap + PCIR_EXPRESS_DEVICE_CTL, val, 2);
661 et_get_eaddr(device_t dev, uint8_t eaddr[])
666 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
667 for (i = 0; i < 4; ++i)
668 eaddr[i] = (val >> (8 * i)) & 0xff;
670 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
671 for (; i < ETHER_ADDR_LEN; ++i)
672 eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
676 et_reset(struct et_softc *sc)
678 CSR_WRITE_4(sc, ET_MAC_CFG1,
679 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
680 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
681 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
683 CSR_WRITE_4(sc, ET_SWRST,
684 ET_SWRST_TXDMA | ET_SWRST_RXDMA |
685 ET_SWRST_TXMAC | ET_SWRST_RXMAC |
686 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
688 CSR_WRITE_4(sc, ET_MAC_CFG1,
689 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
690 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
691 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
695 et_disable_intrs(struct et_softc *sc)
697 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
701 et_enable_intrs(struct et_softc *sc, uint32_t intrs)
703 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
707 et_dma_alloc(device_t dev)
709 struct et_softc *sc = device_get_softc(dev);
710 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
711 struct et_txstatus_data *txsd = &sc->sc_tx_status;
712 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
713 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
717 * Create top level DMA tag
719 error = bus_dma_tag_create(NULL, 1, 0,
720 BUS_SPACE_MAXADDR_32BIT,
724 BUS_SPACE_UNRESTRICTED,
725 BUS_SPACE_MAXSIZE_32BIT,
726 0, NULL, NULL, &sc->sc_dtag);
728 device_printf(dev, "can't create DMA tag\n");
733 * Create TX ring DMA stuffs
735 error = et_dma_mem_create(dev, ET_TX_RING_SIZE, &tx_ring->tr_dtag,
736 (void **)&tx_ring->tr_desc,
737 &tx_ring->tr_paddr, &tx_ring->tr_dmap);
739 device_printf(dev, "can't create TX ring DMA stuffs\n");
744 * Create TX status DMA stuffs
746 error = et_dma_mem_create(dev, sizeof(uint32_t), &txsd->txsd_dtag,
747 (void **)&txsd->txsd_status,
748 &txsd->txsd_paddr, &txsd->txsd_dmap);
750 device_printf(dev, "can't create TX status DMA stuffs\n");
755 * Create DMA stuffs for RX rings
757 for (i = 0; i < ET_RX_NRING; ++i) {
758 static const uint32_t rx_ring_posreg[ET_RX_NRING] =
759 { ET_RX_RING0_POS, ET_RX_RING1_POS };
761 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
763 error = et_dma_mem_create(dev, ET_RX_RING_SIZE,
765 (void **)&rx_ring->rr_desc,
769 device_printf(dev, "can't create DMA stuffs for "
770 "the %d RX ring\n", i);
773 rx_ring->rr_posreg = rx_ring_posreg[i];
777 * Create RX stat ring DMA stuffs
779 error = et_dma_mem_create(dev, ET_RXSTAT_RING_SIZE,
780 &rxst_ring->rsr_dtag,
781 (void **)&rxst_ring->rsr_stat,
782 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap);
784 device_printf(dev, "can't create RX stat ring DMA stuffs\n");
789 * Create RX status DMA stuffs
791 error = et_dma_mem_create(dev, sizeof(struct et_rxstatus),
793 (void **)&rxsd->rxsd_status,
794 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap);
796 device_printf(dev, "can't create RX status DMA stuffs\n");
801 * Create mbuf DMA stuffs
803 error = et_dma_mbuf_create(dev);
811 et_dma_free(device_t dev)
813 struct et_softc *sc = device_get_softc(dev);
814 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
815 struct et_txstatus_data *txsd = &sc->sc_tx_status;
816 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
817 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
818 int i, rx_done[ET_RX_NRING];
821 * Destroy TX ring DMA stuffs
823 et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc,
827 * Destroy TX status DMA stuffs
829 et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status,
833 * Destroy DMA stuffs for RX rings
835 for (i = 0; i < ET_RX_NRING; ++i) {
836 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
838 et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc,
843 * Destroy RX stat ring DMA stuffs
845 et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat,
846 rxst_ring->rsr_dmap);
849 * Destroy RX status DMA stuffs
851 et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status,
855 * Destroy mbuf DMA stuffs
857 for (i = 0; i < ET_RX_NRING; ++i)
858 rx_done[i] = ET_RX_NDESC;
859 et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done);
862 * Destroy top level DMA tag
864 if (sc->sc_dtag != NULL)
865 bus_dma_tag_destroy(sc->sc_dtag);
869 et_dma_mbuf_create(device_t dev)
871 struct et_softc *sc = device_get_softc(dev);
872 struct et_txbuf_data *tbd = &sc->sc_tx_data;
873 int i, error, rx_done[ET_RX_NRING];
876 * Create mbuf DMA tag
878 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
879 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
881 ET_JUMBO_FRAMELEN, ET_NSEG_MAX,
882 BUS_SPACE_MAXSIZE_32BIT,
883 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_mbuf_dtag);
885 device_printf(dev, "can't create mbuf DMA tag\n");
890 * Create spare DMA map for RX mbufs
892 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, &sc->sc_mbuf_tmp_dmap);
894 device_printf(dev, "can't create spare mbuf DMA map\n");
895 bus_dma_tag_destroy(sc->sc_mbuf_dtag);
896 sc->sc_mbuf_dtag = NULL;
901 * Create DMA maps for RX mbufs
903 bzero(rx_done, sizeof(rx_done));
904 for (i = 0; i < ET_RX_NRING; ++i) {
905 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
908 for (j = 0; j < ET_RX_NDESC; ++j) {
909 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
910 &rbd->rbd_buf[j].rb_dmap);
912 device_printf(dev, "can't create %d RX mbuf "
913 "for %d RX ring\n", j, i);
915 et_dma_mbuf_destroy(dev, 0, rx_done);
919 rx_done[i] = ET_RX_NDESC;
922 rbd->rbd_ring = &sc->sc_rx_ring[i];
926 * Create DMA maps for TX mbufs
928 for (i = 0; i < ET_TX_NDESC; ++i) {
929 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
930 &tbd->tbd_buf[i].tb_dmap);
932 device_printf(dev, "can't create %d TX mbuf "
934 et_dma_mbuf_destroy(dev, i, rx_done);
943 et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[])
945 struct et_softc *sc = device_get_softc(dev);
946 struct et_txbuf_data *tbd = &sc->sc_tx_data;
949 if (sc->sc_mbuf_dtag == NULL)
953 * Destroy DMA maps for RX mbufs
955 for (i = 0; i < ET_RX_NRING; ++i) {
956 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
959 for (j = 0; j < rx_done[i]; ++j) {
960 struct et_rxbuf *rb = &rbd->rbd_buf[j];
962 KASSERT(rb->rb_mbuf == NULL,
963 ("RX mbuf in %d RX ring is not freed yet\n", i));
964 bus_dmamap_destroy(sc->sc_mbuf_dtag, rb->rb_dmap);
969 * Destroy DMA maps for TX mbufs
971 for (i = 0; i < tx_done; ++i) {
972 struct et_txbuf *tb = &tbd->tbd_buf[i];
974 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n"));
975 bus_dmamap_destroy(sc->sc_mbuf_dtag, tb->tb_dmap);
979 * Destroy spare mbuf DMA map
981 bus_dmamap_destroy(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap);
984 * Destroy mbuf DMA tag
986 bus_dma_tag_destroy(sc->sc_mbuf_dtag);
987 sc->sc_mbuf_dtag = NULL;
991 et_dma_mem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag,
992 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap)
994 struct et_softc *sc = device_get_softc(dev);
997 error = bus_dma_tag_create(sc->sc_dtag, ET_ALIGN, 0,
998 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1000 size, 1, BUS_SPACE_MAXSIZE_32BIT,
1001 0, NULL, NULL, dtag);
1003 device_printf(dev, "can't create DMA tag\n");
1007 error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1010 device_printf(dev, "can't allocate DMA mem\n");
1011 bus_dma_tag_destroy(*dtag);
1016 error = bus_dmamap_load(*dtag, *dmap, *addr, size,
1017 et_dma_ring_addr, paddr, BUS_DMA_WAITOK);
1019 device_printf(dev, "can't load DMA mem\n");
1020 bus_dmamem_free(*dtag, *addr, *dmap);
1021 bus_dma_tag_destroy(*dtag);
1029 et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap)
1032 bus_dmamap_unload(dtag, dmap);
1033 bus_dmamem_free(dtag, addr, dmap);
1034 bus_dma_tag_destroy(dtag);
1039 et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
1041 KASSERT(nseg == 1, ("too many segments\n"));
1042 *((bus_addr_t *)arg) = seg->ds_addr;
1046 et_chip_attach(struct et_softc *sc)
1051 * Perform minimal initialization
1054 /* Disable loopback */
1055 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1058 CSR_WRITE_4(sc, ET_MAC_CFG1,
1059 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1060 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1061 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1064 * Setup half duplex mode
1066 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1067 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1068 (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1069 ET_MAC_HDX_EXC_DEFER;
1070 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1072 /* Clear MAC control */
1073 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1076 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1078 /* Bring MAC out of reset state */
1079 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1081 /* Enable memory controllers */
1082 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1088 struct et_softc *sc = xsc;
1094 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1099 et_disable_intrs(sc);
1101 intrs = CSR_READ_4(sc, ET_INTR_STATUS);
1103 if (intrs == 0) /* Not interested */
1106 if (intrs & ET_INTR_RXEOF)
1108 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
1110 if (intrs & ET_INTR_TIMER)
1111 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1113 et_enable_intrs(sc, ET_INTRS);
1118 et_init_locked(struct et_softc *sc)
1120 struct ifnet *ifp = sc->ifp;
1121 const struct et_bsize *arr;
1126 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1131 arr = et_bufsize_std;
1132 for (i = 0; i < ET_RX_NRING; ++i) {
1133 sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize;
1134 sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf;
1137 error = et_init_tx_ring(sc);
1141 error = et_init_rx_ring(sc);
1145 error = et_chip_init(sc);
1149 error = et_enable_txrx(sc, 1);
1153 et_enable_intrs(sc, ET_INTRS);
1155 callout_reset(&sc->sc_tick, hz, et_tick, sc);
1157 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1159 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1160 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1169 struct et_softc *sc = xsc;
1177 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1179 struct et_softc *sc = ifp->if_softc;
1180 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1181 struct ifreq *ifr = (struct ifreq *)data;
1182 int error = 0, mask, max_framelen;
1188 if (ifp->if_flags & IFF_UP) {
1189 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1190 if ((ifp->if_flags ^ sc->sc_if_flags) &
1191 (IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1197 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1200 sc->sc_if_flags = ifp->if_flags;
1206 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1211 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1221 if (sc->sc_flags & ET_FLAG_JUMBO)
1222 max_framelen = ET_JUMBO_FRAMELEN;
1225 max_framelen = MCLBYTES - 1;
1227 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1232 if (ifp->if_mtu != ifr->ifr_mtu) {
1233 ifp->if_mtu = ifr->ifr_mtu;
1234 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1241 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1242 if ((mask & IFCAP_TXCSUM) != 0 &&
1243 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1244 ifp->if_capenable ^= IFCAP_TXCSUM;
1245 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1246 ifp->if_hwassist |= ET_CSUM_FEATURES;
1248 ifp->if_hwassist &= ~ET_CSUM_FEATURES;
1254 error = ether_ioctl(ifp, cmd, data);
1261 et_start_locked(struct ifnet *ifp)
1263 struct et_softc *sc = ifp->if_softc;
1264 struct et_txbuf_data *tbd;
1268 tbd = &sc->sc_tx_data;
1270 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1273 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING)
1280 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) {
1281 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1285 IFQ_DEQUEUE(&ifp->if_snd, m);
1289 if (et_encap(sc, &m)) {
1291 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1300 sc->watchdog_timer = 5;
1304 et_start(struct ifnet *ifp)
1306 struct et_softc *sc = ifp->if_softc;
1309 et_start_locked(ifp);
1314 et_watchdog(struct et_softc *sc)
1318 if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
1321 if_printf(sc->ifp, "watchdog timed out\n");
1324 et_start_locked(sc->ifp);
1328 et_stop_rxdma(struct et_softc *sc)
1330 CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1331 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1334 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1335 if_printf(sc->ifp, "can't stop RX DMA engine\n");
1342 et_stop_txdma(struct et_softc *sc)
1344 CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1345 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1350 et_free_tx_ring(struct et_softc *sc)
1352 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1353 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1356 for (i = 0; i < ET_TX_NDESC; ++i) {
1357 struct et_txbuf *tb = &tbd->tbd_buf[i];
1359 if (tb->tb_mbuf != NULL) {
1360 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1361 m_freem(tb->tb_mbuf);
1366 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1367 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1368 BUS_DMASYNC_PREWRITE);
1372 et_free_rx_ring(struct et_softc *sc)
1376 for (n = 0; n < ET_RX_NRING; ++n) {
1377 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1378 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
1381 for (i = 0; i < ET_RX_NDESC; ++i) {
1382 struct et_rxbuf *rb = &rbd->rbd_buf[i];
1384 if (rb->rb_mbuf != NULL) {
1385 bus_dmamap_unload(sc->sc_mbuf_dtag,
1387 m_freem(rb->rb_mbuf);
1392 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
1393 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
1394 BUS_DMASYNC_PREWRITE);
1399 et_setmulti(struct et_softc *sc)
1402 uint32_t hash[4] = { 0, 0, 0, 0 };
1403 uint32_t rxmac_ctrl, pktfilt;
1404 struct ifmultiaddr *ifma;
1410 pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1411 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1413 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1414 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1415 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1420 if_maddr_rlock(ifp);
1421 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1424 if (ifma->ifma_addr->sa_family != AF_LINK)
1427 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1428 ifma->ifma_addr), ETHER_ADDR_LEN);
1429 h = (h & 0x3f800000) >> 23;
1432 if (h >= 32 && h < 64) {
1435 } else if (h >= 64 && h < 96) {
1438 } else if (h >= 96) {
1446 if_maddr_runlock(ifp);
1448 for (i = 0; i < 4; ++i)
1449 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1452 pktfilt |= ET_PKTFILT_MCAST;
1453 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1455 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1456 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1460 et_chip_init(struct et_softc *sc)
1462 struct ifnet *ifp = sc->ifp;
1464 int error, frame_len, rxmem_size;
1467 * Split 16Kbytes internal memory between TX and RX
1468 * according to frame length.
1470 frame_len = ET_FRAMELEN(ifp->if_mtu);
1471 if (frame_len < 2048) {
1472 rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1473 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1474 rxmem_size = ET_MEM_SIZE / 2;
1476 rxmem_size = ET_MEM_SIZE -
1477 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1479 rxq_end = ET_QUEUE_ADDR(rxmem_size);
1481 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1482 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1483 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1484 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1487 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1489 /* Clear MSI configure */
1490 if ((sc->sc_flags & ET_FLAG_MSI) == 0)
1491 CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1494 CSR_WRITE_4(sc, ET_TIMER, 0);
1496 /* Initialize MAC */
1499 /* Enable memory controllers */
1500 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1502 /* Initialize RX MAC */
1505 /* Initialize TX MAC */
1508 /* Initialize RX DMA engine */
1509 error = et_init_rxdma(sc);
1513 /* Initialize TX DMA engine */
1514 error = et_init_txdma(sc);
1522 et_init_tx_ring(struct et_softc *sc)
1524 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1525 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1526 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1528 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1529 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1530 BUS_DMASYNC_PREWRITE);
1532 tbd->tbd_start_index = 0;
1533 tbd->tbd_start_wrap = 0;
1536 bzero(txsd->txsd_status, sizeof(uint32_t));
1537 bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1538 BUS_DMASYNC_PREWRITE);
1543 et_init_rx_ring(struct et_softc *sc)
1545 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1546 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1549 for (n = 0; n < ET_RX_NRING; ++n) {
1550 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1553 for (i = 0; i < ET_RX_NDESC; ++i) {
1554 error = rbd->rbd_newbuf(rbd, i, 1);
1556 if_printf(sc->ifp, "%d ring %d buf, "
1557 "newbuf failed: %d\n", n, i, error);
1563 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1564 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1565 BUS_DMASYNC_PREWRITE);
1567 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1568 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1569 BUS_DMASYNC_PREWRITE);
1575 et_dma_buf_addr(void *xctx, bus_dma_segment_t *segs, int nsegs,
1576 bus_size_t mapsz __unused, int error)
1578 struct et_dmamap_ctx *ctx = xctx;
1584 if (nsegs > ctx->nsegs) {
1590 for (i = 0; i < nsegs; ++i)
1591 ctx->segs[i] = segs[i];
1595 et_init_rxdma(struct et_softc *sc)
1597 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1598 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1599 struct et_rxdesc_ring *rx_ring;
1602 error = et_stop_rxdma(sc);
1604 if_printf(sc->ifp, "can't init RX DMA engine\n");
1611 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1612 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1615 * Install RX stat ring
1617 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1618 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1619 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1620 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1621 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1623 /* Match ET_RXSTAT_POS */
1624 rxst_ring->rsr_index = 0;
1625 rxst_ring->rsr_wrap = 0;
1628 * Install the 2nd RX descriptor ring
1630 rx_ring = &sc->sc_rx_ring[1];
1631 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1632 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1633 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1634 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1635 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1637 /* Match ET_RX_RING1_POS */
1638 rx_ring->rr_index = 0;
1639 rx_ring->rr_wrap = 1;
1642 * Install the 1st RX descriptor ring
1644 rx_ring = &sc->sc_rx_ring[0];
1645 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1646 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1647 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1648 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1649 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1651 /* Match ET_RX_RING0_POS */
1652 rx_ring->rr_index = 0;
1653 rx_ring->rr_wrap = 1;
1656 * RX intr moderation
1658 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1659 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1665 et_init_txdma(struct et_softc *sc)
1667 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1668 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1671 error = et_stop_txdma(sc);
1673 if_printf(sc->ifp, "can't init TX DMA engine\n");
1678 * Install TX descriptor ring
1680 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1681 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1682 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1687 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1688 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1690 CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1692 /* Match ET_TX_READY_POS */
1693 tx_ring->tr_ready_index = 0;
1694 tx_ring->tr_ready_wrap = 0;
1700 et_init_mac(struct et_softc *sc)
1702 struct ifnet *ifp = sc->ifp;
1703 const uint8_t *eaddr = IF_LLADDR(ifp);
1707 CSR_WRITE_4(sc, ET_MAC_CFG1,
1708 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1709 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1710 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1713 * Setup inter packet gap
1715 val = (56 << ET_IPG_NONB2B_1_SHIFT) |
1716 (88 << ET_IPG_NONB2B_2_SHIFT) |
1717 (80 << ET_IPG_MINIFG_SHIFT) |
1718 (96 << ET_IPG_B2B_SHIFT);
1719 CSR_WRITE_4(sc, ET_IPG, val);
1722 * Setup half duplex mode
1724 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1725 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1726 (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1727 ET_MAC_HDX_EXC_DEFER;
1728 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1730 /* Clear MAC control */
1731 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1734 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1739 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1740 CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1741 val = (eaddr[0] << 16) | (eaddr[1] << 24);
1742 CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1744 /* Set max frame length */
1745 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1747 /* Bring MAC out of reset state */
1748 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1752 et_init_rxmac(struct et_softc *sc)
1754 struct ifnet *ifp = sc->ifp;
1755 const uint8_t *eaddr = IF_LLADDR(ifp);
1759 /* Disable RX MAC and WOL */
1760 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1763 * Clear all WOL related registers
1765 for (i = 0; i < 3; ++i)
1766 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1767 for (i = 0; i < 20; ++i)
1768 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1771 * Set WOL source address. XXX is this necessary?
1773 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1774 CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1775 val = (eaddr[0] << 8) | eaddr[1];
1776 CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1778 /* Clear packet filters */
1779 CSR_WRITE_4(sc, ET_PKTFILT, 0);
1781 /* No ucast filtering */
1782 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1783 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1784 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1786 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1788 * In order to transmit jumbo packets greater than
1789 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1790 * RX MAC and RX DMA needs to be reduced in size to
1791 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In
1792 * order to implement this, we must use "cut through"
1793 * mode in the RX MAC, which chops packets down into
1794 * segments. In this case we selected 256 bytes,
1795 * since this is the size of the PCI-Express TLP's
1796 * that the ET1310 uses.
1798 val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) |
1799 ET_RXMAC_MC_SEGSZ_ENABLE;
1803 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1805 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1807 /* Initialize RX MAC management register */
1808 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1810 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1812 CSR_WRITE_4(sc, ET_RXMAC_MGT,
1813 ET_RXMAC_MGT_PASS_ECRC |
1814 ET_RXMAC_MGT_PASS_ELEN |
1815 ET_RXMAC_MGT_PASS_ETRUNC |
1816 ET_RXMAC_MGT_CHECK_PKT);
1819 * Configure runt filtering (may not work on certain chip generation)
1821 val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) &
1822 ET_PKTFILT_MINLEN_MASK;
1823 val |= ET_PKTFILT_FRAG;
1824 CSR_WRITE_4(sc, ET_PKTFILT, val);
1826 /* Enable RX MAC but leave WOL disabled */
1827 CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1828 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1831 * Setup multicast hash and allmulti/promisc mode
1837 et_init_txmac(struct et_softc *sc)
1839 /* Disable TX MAC and FC(?) */
1840 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1842 /* No flow control yet */
1843 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1845 /* Enable TX MAC but leave FC(?) diabled */
1846 CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1847 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1851 et_start_rxdma(struct et_softc *sc)
1855 val |= (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) |
1856 ET_RXDMA_CTRL_RING0_ENABLE;
1857 val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) |
1858 ET_RXDMA_CTRL_RING1_ENABLE;
1860 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1864 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1865 if_printf(sc->ifp, "can't start RX DMA engine\n");
1872 et_start_txdma(struct et_softc *sc)
1874 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1879 et_enable_txrx(struct et_softc *sc, int media_upd)
1881 struct ifnet *ifp = sc->ifp;
1885 val = CSR_READ_4(sc, ET_MAC_CFG1);
1886 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
1887 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
1888 ET_MAC_CFG1_LOOPBACK);
1889 CSR_WRITE_4(sc, ET_MAC_CFG1, val);
1892 et_ifmedia_upd_locked(ifp);
1898 for (i = 0; i < NRETRY; ++i) {
1899 val = CSR_READ_4(sc, ET_MAC_CFG1);
1900 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
1901 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
1907 if_printf(ifp, "can't enable RX/TX\n");
1910 sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
1915 * Start TX/RX DMA engine
1917 error = et_start_rxdma(sc);
1921 error = et_start_txdma(sc);
1929 et_rxeof(struct et_softc *sc)
1932 struct et_rxstatus_data *rxsd;
1933 struct et_rxstat_ring *rxst_ring;
1934 uint32_t rxs_stat_ring, rxst_info2;
1935 int rxst_wrap, rxst_index;
1939 rxsd = &sc->sc_rx_status;
1940 rxst_ring = &sc->sc_rxstat_ring;
1942 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1945 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1946 BUS_DMASYNC_POSTREAD);
1947 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1948 BUS_DMASYNC_POSTREAD);
1950 rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring);
1951 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
1952 rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >>
1953 ET_RXS_STATRING_INDEX_SHIFT;
1955 while (rxst_index != rxst_ring->rsr_index ||
1956 rxst_wrap != rxst_ring->rsr_wrap) {
1957 struct et_rxbuf_data *rbd;
1958 struct et_rxdesc_ring *rx_ring;
1959 struct et_rxstat *st;
1961 int buflen, buf_idx, ring_idx;
1962 uint32_t rxstat_pos, rxring_pos;
1964 MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
1965 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
1966 rxst_info2 = le32toh(st->rxst_info2);
1967 buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >>
1968 ET_RXST_INFO2_LEN_SHIFT;
1969 buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >>
1970 ET_RXST_INFO2_BUFIDX_SHIFT;
1971 ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >>
1972 ET_RXST_INFO2_RINGIDX_SHIFT;
1974 if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
1975 rxst_ring->rsr_index = 0;
1976 rxst_ring->rsr_wrap ^= 1;
1978 rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK;
1979 if (rxst_ring->rsr_wrap)
1980 rxstat_pos |= ET_RXSTAT_POS_WRAP;
1981 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
1983 if (ring_idx >= ET_RX_NRING) {
1985 if_printf(ifp, "invalid ring index %d\n", ring_idx);
1988 if (buf_idx >= ET_RX_NDESC) {
1990 if_printf(ifp, "invalid buf index %d\n", buf_idx);
1994 rbd = &sc->sc_rx_data[ring_idx];
1995 m = rbd->rbd_buf[buf_idx].rb_mbuf;
1997 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
1998 if (buflen < ETHER_CRC_LEN) {
2003 m->m_pkthdr.len = m->m_len =
2004 buflen - ETHER_CRC_LEN;
2005 m->m_pkthdr.rcvif = ifp;
2008 ifp->if_input(ifp, m);
2014 m = NULL; /* Catch invalid reference */
2016 rx_ring = &sc->sc_rx_ring[ring_idx];
2018 if (buf_idx != rx_ring->rr_index) {
2019 if_printf(ifp, "WARNING!! ring %d, "
2020 "buf_idx %d, rr_idx %d\n",
2021 ring_idx, buf_idx, rx_ring->rr_index);
2024 MPASS(rx_ring->rr_index < ET_RX_NDESC);
2025 if (++rx_ring->rr_index == ET_RX_NDESC) {
2026 rx_ring->rr_index = 0;
2027 rx_ring->rr_wrap ^= 1;
2029 rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK;
2030 if (rx_ring->rr_wrap)
2031 rxring_pos |= ET_RX_RING_POS_WRAP;
2032 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
2037 et_encap(struct et_softc *sc, struct mbuf **m0)
2039 struct mbuf *m = *m0;
2040 bus_dma_segment_t segs[ET_NSEG_MAX];
2041 struct et_dmamap_ctx ctx;
2042 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
2043 struct et_txbuf_data *tbd = &sc->sc_tx_data;
2044 struct et_txdesc *td;
2046 int error, maxsegs, first_idx, last_idx, i;
2047 uint32_t csum_flags, tx_ready_pos, last_td_ctrl2;
2049 maxsegs = ET_TX_NDESC - tbd->tbd_used;
2050 if (maxsegs > ET_NSEG_MAX)
2051 maxsegs = ET_NSEG_MAX;
2052 KASSERT(maxsegs >= ET_NSEG_SPARE,
2053 ("not enough spare TX desc (%d)\n", maxsegs));
2055 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2056 first_idx = tx_ring->tr_ready_index;
2057 map = tbd->tbd_buf[first_idx].tb_dmap;
2059 ctx.nsegs = maxsegs;
2061 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
2062 et_dma_buf_addr, &ctx, BUS_DMA_NOWAIT);
2063 if (!error && ctx.nsegs == 0) {
2064 bus_dmamap_unload(sc->sc_mbuf_dtag, map);
2067 if (error && error != EFBIG) {
2068 if_printf(sc->ifp, "can't load TX mbuf, error %d\n",
2072 if (error) { /* error == EFBIG */
2075 m_new = m_defrag(m, M_DONTWAIT);
2076 if (m_new == NULL) {
2077 if_printf(sc->ifp, "can't defrag TX mbuf\n");
2084 ctx.nsegs = maxsegs;
2086 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
2087 et_dma_buf_addr, &ctx,
2089 if (error || ctx.nsegs == 0) {
2090 if (ctx.nsegs == 0) {
2091 bus_dmamap_unload(sc->sc_mbuf_dtag, map);
2095 "can't load defraged TX mbuf\n");
2100 bus_dmamap_sync(sc->sc_mbuf_dtag, map, BUS_DMASYNC_PREWRITE);
2102 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2103 sc->sc_tx += ctx.nsegs;
2104 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2105 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2106 last_td_ctrl2 |= ET_TDCTRL2_INTR;
2110 if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) {
2111 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2112 csum_flags |= ET_TDCTRL2_CSUM_IP;
2113 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2114 csum_flags |= ET_TDCTRL2_CSUM_UDP;
2115 else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2116 csum_flags |= ET_TDCTRL2_CSUM_TCP;
2119 for (i = 0; i < ctx.nsegs; ++i) {
2122 idx = (first_idx + i) % ET_TX_NDESC;
2123 td = &tx_ring->tr_desc[idx];
2124 td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr));
2125 td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr));
2126 td->td_ctrl1 = htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK);
2127 if (i == ctx.nsegs - 1) { /* Last frag */
2128 td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags);
2131 td->td_ctrl2 = htole32(csum_flags);
2133 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2134 if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2135 tx_ring->tr_ready_index = 0;
2136 tx_ring->tr_ready_wrap ^= 1;
2139 td = &tx_ring->tr_desc[first_idx];
2140 td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG); /* First frag */
2142 MPASS(last_idx >= 0);
2143 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2144 tbd->tbd_buf[last_idx].tb_dmap = map;
2145 tbd->tbd_buf[last_idx].tb_mbuf = m;
2147 tbd->tbd_used += ctx.nsegs;
2148 MPASS(tbd->tbd_used <= ET_TX_NDESC);
2150 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2151 BUS_DMASYNC_PREWRITE);
2153 tx_ready_pos = tx_ring->tr_ready_index & ET_TX_READY_POS_INDEX_MASK;
2154 if (tx_ring->tr_ready_wrap)
2155 tx_ready_pos |= ET_TX_READY_POS_WRAP;
2156 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
2168 et_txeof(struct et_softc *sc)
2171 struct et_txdesc_ring *tx_ring;
2172 struct et_txbuf_data *tbd;
2178 tx_ring = &sc->sc_tx_ring;
2179 tbd = &sc->sc_tx_data;
2181 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2184 if (tbd->tbd_used == 0)
2187 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2188 end = tx_done & ET_TX_DONE_POS_INDEX_MASK;
2189 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2191 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2192 struct et_txbuf *tb;
2194 MPASS(tbd->tbd_start_index < ET_TX_NDESC);
2195 tb = &tbd->tbd_buf[tbd->tbd_start_index];
2197 bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
2198 sizeof(struct et_txdesc));
2199 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2200 BUS_DMASYNC_PREWRITE);
2202 if (tb->tb_mbuf != NULL) {
2203 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
2204 m_freem(tb->tb_mbuf);
2209 if (++tbd->tbd_start_index == ET_TX_NDESC) {
2210 tbd->tbd_start_index = 0;
2211 tbd->tbd_start_wrap ^= 1;
2214 MPASS(tbd->tbd_used > 0);
2218 if (tbd->tbd_used == 0)
2219 sc->watchdog_timer = 0;
2220 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
2221 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2223 et_start_locked(ifp);
2229 struct et_softc *sc = xsc;
2231 struct mii_data *mii;
2235 mii = device_get_softc(sc->sc_miibus);
2238 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 &&
2239 (mii->mii_media_status & IFM_ACTIVE) &&
2240 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2241 if_printf(ifp, "Link up, enable TX/RX\n");
2242 if (et_enable_txrx(sc, 0) == 0)
2243 et_start_locked(ifp);
2246 callout_reset(&sc->sc_tick, hz, et_tick, sc);
2250 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
2252 return (et_newbuf(rbd, buf_idx, init, MCLBYTES));
2256 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
2258 return (et_newbuf(rbd, buf_idx, init, MHLEN));
2262 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
2264 struct et_softc *sc = rbd->rbd_softc;
2265 struct et_rxbuf *rb;
2267 struct et_dmamap_ctx ctx;
2268 bus_dma_segment_t seg;
2272 MPASS(buf_idx < ET_RX_NDESC);
2273 rb = &rbd->rbd_buf[buf_idx];
2275 m = m_getl(len0, /* init ? M_WAIT :*/ M_DONTWAIT, MT_DATA, M_PKTHDR, &len);
2281 "m_getl failed, size %d\n", len0);
2287 m->m_len = m->m_pkthdr.len = len;
2290 * Try load RX mbuf into temporary DMA tag
2294 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap, m,
2295 et_dma_buf_addr, &ctx,
2296 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2297 if (error || ctx.nsegs == 0) {
2299 bus_dmamap_unload(sc->sc_mbuf_dtag,
2300 sc->sc_mbuf_tmp_dmap);
2302 if_printf(sc->ifp, "too many segments?!\n");
2308 if_printf(sc->ifp, "can't load RX mbuf\n");
2316 bus_dmamap_sync(sc->sc_mbuf_dtag, rb->rb_dmap,
2317 BUS_DMASYNC_POSTREAD);
2318 bus_dmamap_unload(sc->sc_mbuf_dtag, rb->rb_dmap);
2321 rb->rb_paddr = seg.ds_addr;
2324 * Swap RX buf's DMA map with the loaded temporary one
2327 rb->rb_dmap = sc->sc_mbuf_tmp_dmap;
2328 sc->sc_mbuf_tmp_dmap = dmap;
2332 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr);
2337 * Create sysctl tree
2340 et_add_sysctls(struct et_softc * sc)
2342 struct sysctl_ctx_list *ctx;
2343 struct sysctl_oid_list *children;
2345 ctx = device_get_sysctl_ctx(sc->dev);
2346 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2348 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
2349 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I",
2350 "RX IM, # packets per RX interrupt");
2351 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
2352 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I",
2353 "RX IM, RX interrupt delay (x10 usec)");
2354 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
2355 CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
2356 "TX IM, # segments per TX interrupt");
2357 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
2358 CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
2362 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2364 struct et_softc *sc = arg1;
2365 struct ifnet *ifp = sc->ifp;
2368 v = sc->sc_rx_intr_npkts;
2369 error = sysctl_handle_int(oidp, &v, 0, req);
2370 if (error || req->newptr == NULL)
2377 if (sc->sc_rx_intr_npkts != v) {
2378 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2379 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2380 sc->sc_rx_intr_npkts = v;
2387 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2389 struct et_softc *sc = arg1;
2390 struct ifnet *ifp = sc->ifp;
2393 v = sc->sc_rx_intr_delay;
2394 error = sysctl_handle_int(oidp, &v, 0, req);
2395 if (error || req->newptr == NULL)
2402 if (sc->sc_rx_intr_delay != v) {
2403 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2404 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2405 sc->sc_rx_intr_delay = v;
2412 et_setmedia(struct et_softc *sc)
2414 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2415 uint32_t cfg2, ctrl;
2417 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
2418 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
2419 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
2420 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
2421 ((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) &
2422 ET_MAC_CFG2_PREAMBLE_LEN_MASK);
2424 ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
2425 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
2427 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
2428 cfg2 |= ET_MAC_CFG2_MODE_GMII;
2430 cfg2 |= ET_MAC_CFG2_MODE_MII;
2431 ctrl |= ET_MAC_CTRL_MODE_MII;
2434 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
2435 cfg2 |= ET_MAC_CFG2_FDX;
2437 ctrl |= ET_MAC_CTRL_GHDX;
2439 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
2440 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
2444 et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr)
2446 struct et_rxdesc_ring *rx_ring = rbd->rbd_ring;
2447 struct et_rxdesc *desc;
2449 MPASS(buf_idx < ET_RX_NDESC);
2450 desc = &rx_ring->rr_desc[buf_idx];
2452 desc->rd_addr_hi = htole32(ET_ADDR_HI(paddr));
2453 desc->rd_addr_lo = htole32(ET_ADDR_LO(paddr));
2454 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2456 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
2457 BUS_DMASYNC_PREWRITE);