2 * Copyright (c) 2007 Sepherosa Ziehau. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/endian.h>
43 #include <sys/kernel.h>
45 #include <sys/malloc.h>
49 #include <sys/module.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
54 #include <net/ethernet.h>
56 #include <net/if_var.h>
57 #include <net/if_dl.h>
58 #include <net/if_types.h>
60 #include <net/if_arp.h>
61 #include <net/if_media.h>
62 #include <net/if_vlan_var.h>
64 #include <machine/bus.h>
66 #include <dev/mii/mii.h>
67 #include <dev/mii/miivar.h>
69 #include <dev/pci/pcireg.h>
70 #include <dev/pci/pcivar.h>
72 #include <dev/et/if_etreg.h>
73 #include <dev/et/if_etvar.h>
75 #include "miibus_if.h"
77 MODULE_DEPEND(et, pci, 1, 1, 1);
78 MODULE_DEPEND(et, ether, 1, 1, 1);
79 MODULE_DEPEND(et, miibus, 1, 1, 1);
82 static int msi_disable = 0;
83 TUNABLE_INT("hw.et.msi_disable", &msi_disable);
85 #define ET_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
87 static int et_probe(device_t);
88 static int et_attach(device_t);
89 static int et_detach(device_t);
90 static int et_shutdown(device_t);
91 static int et_suspend(device_t);
92 static int et_resume(device_t);
94 static int et_miibus_readreg(device_t, int, int);
95 static int et_miibus_writereg(device_t, int, int, int);
96 static void et_miibus_statchg(device_t);
98 static void et_init_locked(struct et_softc *);
99 static void et_init(void *);
100 static int et_ioctl(struct ifnet *, u_long, caddr_t);
101 static void et_start_locked(struct ifnet *);
102 static void et_start(struct ifnet *);
103 static int et_watchdog(struct et_softc *);
104 static int et_ifmedia_upd_locked(struct ifnet *);
105 static int et_ifmedia_upd(struct ifnet *);
106 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
107 static uint64_t et_get_counter(struct ifnet *, ift_counter);
109 static void et_add_sysctls(struct et_softc *);
110 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
111 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
113 static void et_intr(void *);
114 static void et_rxeof(struct et_softc *);
115 static void et_txeof(struct et_softc *);
117 static int et_dma_alloc(struct et_softc *);
118 static void et_dma_free(struct et_softc *);
119 static void et_dma_map_addr(void *, bus_dma_segment_t *, int, int);
120 static int et_dma_ring_alloc(struct et_softc *, bus_size_t, bus_size_t,
121 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *,
123 static void et_dma_ring_free(struct et_softc *, bus_dma_tag_t *, uint8_t **,
124 bus_dmamap_t, bus_addr_t *);
125 static void et_init_tx_ring(struct et_softc *);
126 static int et_init_rx_ring(struct et_softc *);
127 static void et_free_tx_ring(struct et_softc *);
128 static void et_free_rx_ring(struct et_softc *);
129 static int et_encap(struct et_softc *, struct mbuf **);
130 static int et_newbuf_cluster(struct et_rxbuf_data *, int);
131 static int et_newbuf_hdr(struct et_rxbuf_data *, int);
132 static void et_rxbuf_discard(struct et_rxbuf_data *, int);
134 static void et_stop(struct et_softc *);
135 static int et_chip_init(struct et_softc *);
136 static void et_chip_attach(struct et_softc *);
137 static void et_init_mac(struct et_softc *);
138 static void et_init_rxmac(struct et_softc *);
139 static void et_init_txmac(struct et_softc *);
140 static int et_init_rxdma(struct et_softc *);
141 static int et_init_txdma(struct et_softc *);
142 static int et_start_rxdma(struct et_softc *);
143 static int et_start_txdma(struct et_softc *);
144 static int et_stop_rxdma(struct et_softc *);
145 static int et_stop_txdma(struct et_softc *);
146 static void et_reset(struct et_softc *);
147 static int et_bus_config(struct et_softc *);
148 static void et_get_eaddr(device_t, uint8_t[]);
149 static void et_setmulti(struct et_softc *);
150 static void et_tick(void *);
151 static void et_stats_update(struct et_softc *);
153 static const struct et_dev {
158 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
159 "Agere ET1310 Gigabit Ethernet" },
160 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
161 "Agere ET1310 Fast Ethernet" },
165 static device_method_t et_methods[] = {
166 DEVMETHOD(device_probe, et_probe),
167 DEVMETHOD(device_attach, et_attach),
168 DEVMETHOD(device_detach, et_detach),
169 DEVMETHOD(device_shutdown, et_shutdown),
170 DEVMETHOD(device_suspend, et_suspend),
171 DEVMETHOD(device_resume, et_resume),
173 DEVMETHOD(miibus_readreg, et_miibus_readreg),
174 DEVMETHOD(miibus_writereg, et_miibus_writereg),
175 DEVMETHOD(miibus_statchg, et_miibus_statchg),
180 static driver_t et_driver = {
183 sizeof(struct et_softc)
186 static devclass_t et_devclass;
188 DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0);
189 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0);
191 static int et_rx_intr_npkts = 32;
192 static int et_rx_intr_delay = 20; /* x10 usec */
193 static int et_tx_intr_nsegs = 126;
194 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
196 TUNABLE_INT("hw.et.timer", &et_timer);
197 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
198 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
199 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
202 et_probe(device_t dev)
204 const struct et_dev *d;
207 vid = pci_get_vendor(dev);
208 did = pci_get_device(dev);
210 for (d = et_devices; d->desc != NULL; ++d) {
211 if (vid == d->vid && did == d->did) {
212 device_set_desc(dev, d->desc);
213 return (BUS_PROBE_DEFAULT);
220 et_attach(device_t dev)
224 uint8_t eaddr[ETHER_ADDR_LEN];
226 int cap, error, msic;
228 sc = device_get_softc(dev);
230 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
232 callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
234 ifp = sc->ifp = if_alloc(IFT_ETHER);
236 device_printf(dev, "can not if_alloc()\n");
242 * Initialize tunables
244 sc->sc_rx_intr_npkts = et_rx_intr_npkts;
245 sc->sc_rx_intr_delay = et_rx_intr_delay;
246 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
247 sc->sc_timer = et_timer;
249 /* Enable bus mastering */
250 pci_enable_busmaster(dev);
255 sc->sc_mem_rid = PCIR_BAR(0);
256 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
257 &sc->sc_mem_rid, RF_ACTIVE);
258 if (sc->sc_mem_res == NULL) {
259 device_printf(dev, "can't allocate IO memory\n");
264 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
266 sc->sc_flags |= ET_FLAG_PCIE;
267 msic = pci_msi_count(dev);
269 device_printf(dev, "MSI count: %d\n", msic);
271 if (msic > 0 && msi_disable == 0) {
273 if (pci_alloc_msi(dev, &msic) == 0) {
275 device_printf(dev, "Using %d MSI message\n",
277 sc->sc_flags |= ET_FLAG_MSI;
279 pci_release_msi(dev);
286 if ((sc->sc_flags & ET_FLAG_MSI) == 0) {
288 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
289 &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE);
292 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
293 &sc->sc_irq_rid, RF_ACTIVE);
295 if (sc->sc_irq_res == NULL) {
296 device_printf(dev, "can't allocate irq\n");
301 if (pci_get_device(dev) == PCI_PRODUCT_LUCENT_ET1310_FAST)
302 sc->sc_flags |= ET_FLAG_FASTETHER;
304 error = et_bus_config(sc);
308 et_get_eaddr(dev, eaddr);
310 /* Take PHY out of COMA and enable clocks. */
311 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
312 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
313 pmcfg |= EM_PM_GIGEPHY_ENB;
314 CSR_WRITE_4(sc, ET_PM, pmcfg);
318 error = et_dma_alloc(sc);
323 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
324 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
325 ifp->if_init = et_init;
326 ifp->if_ioctl = et_ioctl;
327 ifp->if_start = et_start;
328 ifp->if_get_counter = et_get_counter;
329 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU;
330 ifp->if_capenable = ifp->if_capabilities;
331 ifp->if_snd.ifq_drv_maxlen = ET_TX_NDESC - 1;
332 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC - 1);
333 IFQ_SET_READY(&ifp->if_snd);
337 error = mii_attach(dev, &sc->sc_miibus, ifp, et_ifmedia_upd,
338 et_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
341 device_printf(dev, "attaching PHYs failed\n");
345 ether_ifattach(ifp, eaddr);
347 /* Tell the upper layer(s) we support long frames. */
348 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
350 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
351 NULL, et_intr, sc, &sc->sc_irq_handle);
354 device_printf(dev, "can't setup intr\n");
367 et_detach(device_t dev)
371 sc = device_get_softc(dev);
372 if (device_is_attached(dev)) {
373 ether_ifdetach(sc->ifp);
377 callout_drain(&sc->sc_tick);
380 if (sc->sc_miibus != NULL)
381 device_delete_child(dev, sc->sc_miibus);
382 bus_generic_detach(dev);
384 if (sc->sc_irq_handle != NULL)
385 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
386 if (sc->sc_irq_res != NULL)
387 bus_release_resource(dev, SYS_RES_IRQ,
388 rman_get_rid(sc->sc_irq_res), sc->sc_irq_res);
389 if ((sc->sc_flags & ET_FLAG_MSI) != 0)
390 pci_release_msi(dev);
391 if (sc->sc_mem_res != NULL)
392 bus_release_resource(dev, SYS_RES_MEMORY,
393 rman_get_rid(sc->sc_mem_res), sc->sc_mem_res);
400 mtx_destroy(&sc->sc_mtx);
406 et_shutdown(device_t dev)
410 sc = device_get_softc(dev);
418 et_miibus_readreg(device_t dev, int phy, int reg)
424 sc = device_get_softc(dev);
425 /* Stop any pending operations */
426 CSR_WRITE_4(sc, ET_MII_CMD, 0);
428 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
429 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
430 CSR_WRITE_4(sc, ET_MII_ADDR, val);
433 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
437 for (i = 0; i < NRETRY; ++i) {
438 val = CSR_READ_4(sc, ET_MII_IND);
439 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
445 "read phy %d, reg %d timed out\n", phy, reg);
452 val = CSR_READ_4(sc, ET_MII_STAT);
453 ret = val & ET_MII_STAT_VALUE_MASK;
456 /* Make sure that the current operation is stopped */
457 CSR_WRITE_4(sc, ET_MII_CMD, 0);
462 et_miibus_writereg(device_t dev, int phy, int reg, int val0)
468 sc = device_get_softc(dev);
469 /* Stop any pending operations */
470 CSR_WRITE_4(sc, ET_MII_CMD, 0);
472 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
473 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
474 CSR_WRITE_4(sc, ET_MII_ADDR, val);
477 CSR_WRITE_4(sc, ET_MII_CTRL,
478 (val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK);
482 for (i = 0; i < NRETRY; ++i) {
483 val = CSR_READ_4(sc, ET_MII_IND);
484 if ((val & ET_MII_IND_BUSY) == 0)
490 "write phy %d, reg %d timed out\n", phy, reg);
491 et_miibus_readreg(dev, phy, reg);
496 /* Make sure that the current operation is stopped */
497 CSR_WRITE_4(sc, ET_MII_CMD, 0);
502 et_miibus_statchg(device_t dev)
505 struct mii_data *mii;
507 uint32_t cfg1, cfg2, ctrl;
510 sc = device_get_softc(dev);
512 mii = device_get_softc(sc->sc_miibus);
514 if (mii == NULL || ifp == NULL ||
515 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
518 sc->sc_flags &= ~ET_FLAG_LINK;
519 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
520 (IFM_ACTIVE | IFM_AVALID)) {
521 switch (IFM_SUBTYPE(mii->mii_media_active)) {
524 sc->sc_flags |= ET_FLAG_LINK;
527 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
528 sc->sc_flags |= ET_FLAG_LINK;
533 /* XXX Stop TX/RX MAC? */
534 if ((sc->sc_flags & ET_FLAG_LINK) == 0)
537 /* Program MACs with resolved speed/duplex/flow-control. */
538 ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
539 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
540 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
541 cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
542 ET_MAC_CFG1_LOOPBACK);
543 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
544 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
545 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
546 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
547 ((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) &
548 ET_MAC_CFG2_PREAMBLE_LEN_MASK);
550 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
551 cfg2 |= ET_MAC_CFG2_MODE_GMII;
553 cfg2 |= ET_MAC_CFG2_MODE_MII;
554 ctrl |= ET_MAC_CTRL_MODE_MII;
557 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
558 cfg2 |= ET_MAC_CFG2_FDX;
560 * Controller lacks automatic TX pause frame
561 * generation so it should be handled by driver.
562 * Even though driver can send pause frame with
563 * arbitrary pause time, controller does not
564 * provide a way that tells how many free RX
565 * buffers are available in controller. This
566 * limitation makes it hard to generate XON frame
567 * in time on driver side so don't enable TX flow
571 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE)
572 cfg1 |= ET_MAC_CFG1_TXFLOW;
574 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE)
575 cfg1 |= ET_MAC_CFG1_RXFLOW;
577 ctrl |= ET_MAC_CTRL_GHDX;
579 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
580 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
581 cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
582 CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1);
586 for (i = 0; i < NRETRY; ++i) {
587 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
588 if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
589 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
594 if_printf(ifp, "can't enable RX/TX\n");
595 sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
601 et_ifmedia_upd_locked(struct ifnet *ifp)
604 struct mii_data *mii;
605 struct mii_softc *miisc;
608 mii = device_get_softc(sc->sc_miibus);
609 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
611 return (mii_mediachg(mii));
615 et_ifmedia_upd(struct ifnet *ifp)
622 res = et_ifmedia_upd_locked(ifp);
629 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
632 struct mii_data *mii;
636 if ((ifp->if_flags & IFF_UP) == 0) {
641 mii = device_get_softc(sc->sc_miibus);
643 ifmr->ifm_active = mii->mii_media_active;
644 ifmr->ifm_status = mii->mii_media_status;
649 et_stop(struct et_softc *sc)
656 callout_stop(&sc->sc_tick);
657 /* Disable interrupts. */
658 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
660 CSR_WRITE_4(sc, ET_MAC_CFG1, CSR_READ_4(sc, ET_MAC_CFG1) & ~(
661 ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN));
673 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
675 sc->watchdog_timer = 0;
676 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
680 et_bus_config(struct et_softc *sc)
682 uint32_t val, max_plsz;
683 uint16_t ack_latency, replay_timer;
686 * Test whether EEPROM is valid
687 * NOTE: Read twice to get the correct value
689 pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
690 val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
691 if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
692 device_printf(sc->dev, "EEPROM status error 0x%02x\n", val);
698 if ((sc->sc_flags & ET_FLAG_PCIE) == 0)
702 * Configure ACK latency and replay timer according to
705 val = pci_read_config(sc->dev,
706 sc->sc_expcap + PCIER_DEVICE_CAP, 4);
707 max_plsz = val & PCIEM_CAP_MAX_PAYLOAD;
710 case ET_PCIV_DEVICE_CAPS_PLSZ_128:
711 ack_latency = ET_PCIV_ACK_LATENCY_128;
712 replay_timer = ET_PCIV_REPLAY_TIMER_128;
715 case ET_PCIV_DEVICE_CAPS_PLSZ_256:
716 ack_latency = ET_PCIV_ACK_LATENCY_256;
717 replay_timer = ET_PCIV_REPLAY_TIMER_256;
721 ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2);
722 replay_timer = pci_read_config(sc->dev,
723 ET_PCIR_REPLAY_TIMER, 2);
724 device_printf(sc->dev, "ack latency %u, replay timer %u\n",
725 ack_latency, replay_timer);
728 if (ack_latency != 0) {
729 pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
730 pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer,
735 * Set L0s and L1 latency timer to 2us
737 val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4);
738 val &= ~(PCIEM_LINK_CAP_L0S_EXIT | PCIEM_LINK_CAP_L1_EXIT);
739 /* L0s exit latency : 2us */
741 /* L1 exit latency : 2us */
743 pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4);
746 * Set max read request size to 2048 bytes
748 pci_set_max_read_req(sc->dev, 2048);
754 et_get_eaddr(device_t dev, uint8_t eaddr[])
759 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
760 for (i = 0; i < 4; ++i)
761 eaddr[i] = (val >> (8 * i)) & 0xff;
763 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
764 for (; i < ETHER_ADDR_LEN; ++i)
765 eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
769 et_reset(struct et_softc *sc)
772 CSR_WRITE_4(sc, ET_MAC_CFG1,
773 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
774 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
775 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
777 CSR_WRITE_4(sc, ET_SWRST,
778 ET_SWRST_TXDMA | ET_SWRST_RXDMA |
779 ET_SWRST_TXMAC | ET_SWRST_RXMAC |
780 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
782 CSR_WRITE_4(sc, ET_MAC_CFG1,
783 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
784 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
785 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
786 /* Disable interrupts. */
787 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
790 struct et_dmamap_arg {
791 bus_addr_t et_busaddr;
795 et_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
797 struct et_dmamap_arg *ctx;
802 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
805 ctx->et_busaddr = segs->ds_addr;
809 et_dma_ring_alloc(struct et_softc *sc, bus_size_t alignment, bus_size_t maxsize,
810 bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr,
813 struct et_dmamap_arg ctx;
816 error = bus_dma_tag_create(sc->sc_dtag, alignment, 0, BUS_SPACE_MAXADDR,
817 BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL,
820 device_printf(sc->dev, "could not create %s dma tag\n", msg);
823 /* Allocate DMA'able memory for ring. */
824 error = bus_dmamem_alloc(*tag, (void **)ring,
825 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
827 device_printf(sc->dev,
828 "could not allocate DMA'able memory for %s\n", msg);
831 /* Load the address of the ring. */
833 error = bus_dmamap_load(*tag, *map, *ring, maxsize, et_dma_map_addr,
834 &ctx, BUS_DMA_NOWAIT);
836 device_printf(sc->dev,
837 "could not load DMA'able memory for %s\n", msg);
840 *paddr = ctx.et_busaddr;
845 et_dma_ring_free(struct et_softc *sc, bus_dma_tag_t *tag, uint8_t **ring,
846 bus_dmamap_t map, bus_addr_t *paddr)
850 bus_dmamap_unload(*tag, map);
854 bus_dmamem_free(*tag, *ring, map);
858 bus_dma_tag_destroy(*tag);
864 et_dma_alloc(struct et_softc *sc)
866 struct et_txdesc_ring *tx_ring;
867 struct et_rxdesc_ring *rx_ring;
868 struct et_rxstat_ring *rxst_ring;
869 struct et_rxstatus_data *rxsd;
870 struct et_rxbuf_data *rbd;
871 struct et_txbuf_data *tbd;
872 struct et_txstatus_data *txsd;
875 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
876 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
877 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
880 device_printf(sc->dev, "could not allocate parent dma tag\n");
885 tx_ring = &sc->sc_tx_ring;
886 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_TX_RING_SIZE,
887 &tx_ring->tr_dtag, (uint8_t **)&tx_ring->tr_desc, &tx_ring->tr_dmap,
888 &tx_ring->tr_paddr, "TX ring");
892 /* TX status block. */
893 txsd = &sc->sc_tx_status;
894 error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN, sizeof(uint32_t),
895 &txsd->txsd_dtag, (uint8_t **)&txsd->txsd_status, &txsd->txsd_dmap,
896 &txsd->txsd_paddr, "TX status block");
900 /* RX ring 0, used as to recive small sized frames. */
901 rx_ring = &sc->sc_rx_ring[0];
902 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
903 &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
904 &rx_ring->rr_paddr, "RX ring 0");
905 rx_ring->rr_posreg = ET_RX_RING0_POS;
909 /* RX ring 1, used as to store normal sized frames. */
910 rx_ring = &sc->sc_rx_ring[1];
911 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
912 &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
913 &rx_ring->rr_paddr, "RX ring 1");
914 rx_ring->rr_posreg = ET_RX_RING1_POS;
919 rxst_ring = &sc->sc_rxstat_ring;
920 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RXSTAT_RING_SIZE,
921 &rxst_ring->rsr_dtag, (uint8_t **)&rxst_ring->rsr_stat,
922 &rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr, "RX stat ring");
926 /* RX status block. */
927 rxsd = &sc->sc_rx_status;
928 error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN,
929 sizeof(struct et_rxstatus), &rxsd->rxsd_dtag,
930 (uint8_t **)&rxsd->rxsd_status, &rxsd->rxsd_dmap,
931 &rxsd->rxsd_paddr, "RX status block");
935 /* Create parent DMA tag for mbufs. */
936 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
937 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
938 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
941 device_printf(sc->dev,
942 "could not allocate parent dma tag for mbuf\n");
946 /* Create DMA tag for mini RX mbufs to use RX ring 0. */
947 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
948 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1,
949 MHLEN, 0, NULL, NULL, &sc->sc_rx_mini_tag);
951 device_printf(sc->dev, "could not create mini RX dma tag\n");
955 /* Create DMA tag for standard RX mbufs to use RX ring 1. */
956 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
957 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
958 MCLBYTES, 0, NULL, NULL, &sc->sc_rx_tag);
960 device_printf(sc->dev, "could not create RX dma tag\n");
964 /* Create DMA tag for TX mbufs. */
965 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
966 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
967 MCLBYTES * ET_NSEG_MAX, ET_NSEG_MAX, MCLBYTES, 0, NULL, NULL,
970 device_printf(sc->dev, "could not create TX dma tag\n");
974 /* Initialize RX ring 0. */
975 rbd = &sc->sc_rx_data[0];
976 rbd->rbd_bufsize = ET_RXDMA_CTRL_RING0_128;
977 rbd->rbd_newbuf = et_newbuf_hdr;
978 rbd->rbd_discard = et_rxbuf_discard;
980 rbd->rbd_ring = &sc->sc_rx_ring[0];
981 /* Create DMA maps for mini RX buffers, ring 0. */
982 for (i = 0; i < ET_RX_NDESC; i++) {
983 error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
984 &rbd->rbd_buf[i].rb_dmap);
986 device_printf(sc->dev,
987 "could not create DMA map for mini RX mbufs\n");
992 /* Create a spare DMA map for mini RX buffers, ring 0. */
993 error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
994 &sc->sc_rx_mini_sparemap);
996 device_printf(sc->dev,
997 "could not create spare DMA map for mini RX mbuf\n");
1001 /* Initialize RX ring 1. */
1002 rbd = &sc->sc_rx_data[1];
1003 rbd->rbd_bufsize = ET_RXDMA_CTRL_RING1_2048;
1004 rbd->rbd_newbuf = et_newbuf_cluster;
1005 rbd->rbd_discard = et_rxbuf_discard;
1006 rbd->rbd_softc = sc;
1007 rbd->rbd_ring = &sc->sc_rx_ring[1];
1008 /* Create DMA maps for standard RX buffers, ring 1. */
1009 for (i = 0; i < ET_RX_NDESC; i++) {
1010 error = bus_dmamap_create(sc->sc_rx_tag, 0,
1011 &rbd->rbd_buf[i].rb_dmap);
1013 device_printf(sc->dev,
1014 "could not create DMA map for mini RX mbufs\n");
1019 /* Create a spare DMA map for standard RX buffers, ring 1. */
1020 error = bus_dmamap_create(sc->sc_rx_tag, 0, &sc->sc_rx_sparemap);
1022 device_printf(sc->dev,
1023 "could not create spare DMA map for RX mbuf\n");
1027 /* Create DMA maps for TX buffers. */
1028 tbd = &sc->sc_tx_data;
1029 for (i = 0; i < ET_TX_NDESC; i++) {
1030 error = bus_dmamap_create(sc->sc_tx_tag, 0,
1031 &tbd->tbd_buf[i].tb_dmap);
1033 device_printf(sc->dev,
1034 "could not create DMA map for TX mbufs\n");
1043 et_dma_free(struct et_softc *sc)
1045 struct et_txdesc_ring *tx_ring;
1046 struct et_rxdesc_ring *rx_ring;
1047 struct et_txstatus_data *txsd;
1048 struct et_rxstat_ring *rxst_ring;
1049 struct et_rxstatus_data *rxsd;
1050 struct et_rxbuf_data *rbd;
1051 struct et_txbuf_data *tbd;
1054 /* Destroy DMA maps for mini RX buffers, ring 0. */
1055 rbd = &sc->sc_rx_data[0];
1056 for (i = 0; i < ET_RX_NDESC; i++) {
1057 if (rbd->rbd_buf[i].rb_dmap) {
1058 bus_dmamap_destroy(sc->sc_rx_mini_tag,
1059 rbd->rbd_buf[i].rb_dmap);
1060 rbd->rbd_buf[i].rb_dmap = NULL;
1063 if (sc->sc_rx_mini_sparemap) {
1064 bus_dmamap_destroy(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap);
1065 sc->sc_rx_mini_sparemap = NULL;
1067 if (sc->sc_rx_mini_tag) {
1068 bus_dma_tag_destroy(sc->sc_rx_mini_tag);
1069 sc->sc_rx_mini_tag = NULL;
1072 /* Destroy DMA maps for standard RX buffers, ring 1. */
1073 rbd = &sc->sc_rx_data[1];
1074 for (i = 0; i < ET_RX_NDESC; i++) {
1075 if (rbd->rbd_buf[i].rb_dmap) {
1076 bus_dmamap_destroy(sc->sc_rx_tag,
1077 rbd->rbd_buf[i].rb_dmap);
1078 rbd->rbd_buf[i].rb_dmap = NULL;
1081 if (sc->sc_rx_sparemap) {
1082 bus_dmamap_destroy(sc->sc_rx_tag, sc->sc_rx_sparemap);
1083 sc->sc_rx_sparemap = NULL;
1085 if (sc->sc_rx_tag) {
1086 bus_dma_tag_destroy(sc->sc_rx_tag);
1087 sc->sc_rx_tag = NULL;
1090 /* Destroy DMA maps for TX buffers. */
1091 tbd = &sc->sc_tx_data;
1092 for (i = 0; i < ET_TX_NDESC; i++) {
1093 if (tbd->tbd_buf[i].tb_dmap) {
1094 bus_dmamap_destroy(sc->sc_tx_tag,
1095 tbd->tbd_buf[i].tb_dmap);
1096 tbd->tbd_buf[i].tb_dmap = NULL;
1099 if (sc->sc_tx_tag) {
1100 bus_dma_tag_destroy(sc->sc_tx_tag);
1101 sc->sc_tx_tag = NULL;
1104 /* Destroy mini RX ring, ring 0. */
1105 rx_ring = &sc->sc_rx_ring[0];
1106 et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1107 rx_ring->rr_dmap, &rx_ring->rr_paddr);
1108 /* Destroy standard RX ring, ring 1. */
1109 rx_ring = &sc->sc_rx_ring[1];
1110 et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1111 rx_ring->rr_dmap, &rx_ring->rr_paddr);
1112 /* Destroy RX stat ring. */
1113 rxst_ring = &sc->sc_rxstat_ring;
1114 et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1115 rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr);
1116 /* Destroy RX status block. */
1117 rxsd = &sc->sc_rx_status;
1118 et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1119 rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr);
1120 /* Destroy TX ring. */
1121 tx_ring = &sc->sc_tx_ring;
1122 et_dma_ring_free(sc, &tx_ring->tr_dtag, (void *)&tx_ring->tr_desc,
1123 tx_ring->tr_dmap, &tx_ring->tr_paddr);
1124 /* Destroy TX status block. */
1125 txsd = &sc->sc_tx_status;
1126 et_dma_ring_free(sc, &txsd->txsd_dtag, (void *)&txsd->txsd_status,
1127 txsd->txsd_dmap, &txsd->txsd_paddr);
1129 /* Destroy the parent tag. */
1131 bus_dma_tag_destroy(sc->sc_dtag);
1137 et_chip_attach(struct et_softc *sc)
1142 * Perform minimal initialization
1145 /* Disable loopback */
1146 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1149 CSR_WRITE_4(sc, ET_MAC_CFG1,
1150 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1151 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1152 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1155 * Setup half duplex mode
1157 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1158 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1159 (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1160 ET_MAC_HDX_EXC_DEFER;
1161 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1163 /* Clear MAC control */
1164 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1167 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1169 /* Bring MAC out of reset state */
1170 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1172 /* Enable memory controllers */
1173 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1179 struct et_softc *sc;
1186 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1189 status = CSR_READ_4(sc, ET_INTR_STATUS);
1190 if ((status & ET_INTRS) == 0)
1193 /* Disable further interrupts. */
1194 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
1196 if (status & (ET_INTR_RXDMA_ERROR | ET_INTR_TXDMA_ERROR)) {
1197 device_printf(sc->dev, "DMA error(0x%08x) -- resetting\n",
1199 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1204 if (status & ET_INTR_RXDMA)
1206 if (status & (ET_INTR_TXDMA | ET_INTR_TIMER))
1208 if (status & ET_INTR_TIMER)
1209 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1210 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1211 CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1212 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1213 et_start_locked(ifp);
1220 et_init_locked(struct et_softc *sc)
1228 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1234 et_init_tx_ring(sc);
1235 error = et_init_rx_ring(sc);
1239 error = et_chip_init(sc);
1244 * Start TX/RX DMA engine
1246 error = et_start_rxdma(sc);
1250 error = et_start_txdma(sc);
1254 /* Enable interrupts. */
1255 CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1257 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1259 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1260 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1262 sc->sc_flags &= ~ET_FLAG_LINK;
1263 et_ifmedia_upd_locked(ifp);
1265 callout_reset(&sc->sc_tick, hz, et_tick, sc);
1275 struct et_softc *sc = xsc;
1283 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1285 struct et_softc *sc;
1286 struct mii_data *mii;
1288 int error, mask, max_framelen;
1291 ifr = (struct ifreq *)data;
1298 if (ifp->if_flags & IFF_UP) {
1299 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1300 if ((ifp->if_flags ^ sc->sc_if_flags) &
1301 (IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1307 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1310 sc->sc_if_flags = ifp->if_flags;
1316 mii = device_get_softc(sc->sc_miibus);
1317 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1322 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1332 if (sc->sc_flags & ET_FLAG_JUMBO)
1333 max_framelen = ET_JUMBO_FRAMELEN;
1336 max_framelen = MCLBYTES - 1;
1338 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1344 if (ifp->if_mtu != ifr->ifr_mtu) {
1345 ifp->if_mtu = ifr->ifr_mtu;
1346 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1347 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1356 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1357 if ((mask & IFCAP_TXCSUM) != 0 &&
1358 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1359 ifp->if_capenable ^= IFCAP_TXCSUM;
1360 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1361 ifp->if_hwassist |= ET_CSUM_FEATURES;
1363 ifp->if_hwassist &= ~ET_CSUM_FEATURES;
1369 error = ether_ioctl(ifp, cmd, data);
1376 et_start_locked(struct ifnet *ifp)
1378 struct et_softc *sc;
1379 struct mbuf *m_head = NULL;
1380 struct et_txdesc_ring *tx_ring;
1381 struct et_txbuf_data *tbd;
1382 uint32_t tx_ready_pos;
1388 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1390 (sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) !=
1391 (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED))
1395 * Driver does not request TX completion interrupt for every
1396 * queued frames to prevent generating excessive interrupts.
1397 * This means driver may wait for TX completion interrupt even
1398 * though some frames were successfully transmitted. Reclaiming
1399 * transmitted frames will ensure driver see all available
1402 tbd = &sc->sc_tx_data;
1403 if (tbd->tbd_used > (ET_TX_NDESC * 2) / 3)
1406 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1407 if (tbd->tbd_used + ET_NSEG_SPARE >= ET_TX_NDESC) {
1408 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1412 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1416 if (et_encap(sc, &m_head)) {
1417 if (m_head == NULL) {
1418 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1421 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1422 if (tbd->tbd_used > 0)
1423 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1427 ETHER_BPF_MTAP(ifp, m_head);
1431 tx_ring = &sc->sc_tx_ring;
1432 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1433 BUS_DMASYNC_PREWRITE);
1434 tx_ready_pos = tx_ring->tr_ready_index &
1435 ET_TX_READY_POS_INDEX_MASK;
1436 if (tx_ring->tr_ready_wrap)
1437 tx_ready_pos |= ET_TX_READY_POS_WRAP;
1438 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
1439 sc->watchdog_timer = 5;
1444 et_start(struct ifnet *ifp)
1446 struct et_softc *sc;
1450 et_start_locked(ifp);
1455 et_watchdog(struct et_softc *sc)
1461 if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
1464 bus_dmamap_sync(sc->sc_tx_status.txsd_dtag, sc->sc_tx_status.txsd_dmap,
1465 BUS_DMASYNC_POSTREAD);
1466 status = le32toh(*(sc->sc_tx_status.txsd_status));
1467 if_printf(sc->ifp, "watchdog timed out (0x%08x) -- resetting\n",
1470 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
1471 sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1473 return (EJUSTRETURN);
1477 et_stop_rxdma(struct et_softc *sc)
1480 CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1481 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1484 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1485 if_printf(sc->ifp, "can't stop RX DMA engine\n");
1492 et_stop_txdma(struct et_softc *sc)
1495 CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1496 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1501 et_free_tx_ring(struct et_softc *sc)
1503 struct et_txdesc_ring *tx_ring;
1504 struct et_txbuf_data *tbd;
1505 struct et_txbuf *tb;
1508 tbd = &sc->sc_tx_data;
1509 tx_ring = &sc->sc_tx_ring;
1510 for (i = 0; i < ET_TX_NDESC; ++i) {
1511 tb = &tbd->tbd_buf[i];
1512 if (tb->tb_mbuf != NULL) {
1513 bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
1514 BUS_DMASYNC_POSTWRITE);
1515 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1516 m_freem(tb->tb_mbuf);
1523 et_free_rx_ring(struct et_softc *sc)
1525 struct et_rxbuf_data *rbd;
1526 struct et_rxdesc_ring *rx_ring;
1527 struct et_rxbuf *rb;
1531 rx_ring = &sc->sc_rx_ring[0];
1532 rbd = &sc->sc_rx_data[0];
1533 for (i = 0; i < ET_RX_NDESC; ++i) {
1534 rb = &rbd->rbd_buf[i];
1535 if (rb->rb_mbuf != NULL) {
1536 bus_dmamap_sync(sc->sc_rx_mini_tag, rx_ring->rr_dmap,
1537 BUS_DMASYNC_POSTREAD);
1538 bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
1539 m_freem(rb->rb_mbuf);
1545 rx_ring = &sc->sc_rx_ring[1];
1546 rbd = &sc->sc_rx_data[1];
1547 for (i = 0; i < ET_RX_NDESC; ++i) {
1548 rb = &rbd->rbd_buf[i];
1549 if (rb->rb_mbuf != NULL) {
1550 bus_dmamap_sync(sc->sc_rx_tag, rx_ring->rr_dmap,
1551 BUS_DMASYNC_POSTREAD);
1552 bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
1553 m_freem(rb->rb_mbuf);
1560 et_setmulti(struct et_softc *sc)
1563 uint32_t hash[4] = { 0, 0, 0, 0 };
1564 uint32_t rxmac_ctrl, pktfilt;
1565 struct ifmultiaddr *ifma;
1571 pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1572 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1574 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1575 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1576 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1581 if_maddr_rlock(ifp);
1582 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1585 if (ifma->ifma_addr->sa_family != AF_LINK)
1588 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1589 ifma->ifma_addr), ETHER_ADDR_LEN);
1590 h = (h & 0x3f800000) >> 23;
1593 if (h >= 32 && h < 64) {
1596 } else if (h >= 64 && h < 96) {
1599 } else if (h >= 96) {
1607 if_maddr_runlock(ifp);
1609 for (i = 0; i < 4; ++i)
1610 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1613 pktfilt |= ET_PKTFILT_MCAST;
1614 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1616 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1617 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1621 et_chip_init(struct et_softc *sc)
1625 int error, frame_len, rxmem_size;
1629 * Split 16Kbytes internal memory between TX and RX
1630 * according to frame length.
1632 frame_len = ET_FRAMELEN(ifp->if_mtu);
1633 if (frame_len < 2048) {
1634 rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1635 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1636 rxmem_size = ET_MEM_SIZE / 2;
1638 rxmem_size = ET_MEM_SIZE -
1639 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1641 rxq_end = ET_QUEUE_ADDR(rxmem_size);
1643 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1644 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1645 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1646 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1649 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1651 /* Clear MSI configure */
1652 if ((sc->sc_flags & ET_FLAG_MSI) == 0)
1653 CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1656 CSR_WRITE_4(sc, ET_TIMER, 0);
1658 /* Initialize MAC */
1661 /* Enable memory controllers */
1662 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1664 /* Initialize RX MAC */
1667 /* Initialize TX MAC */
1670 /* Initialize RX DMA engine */
1671 error = et_init_rxdma(sc);
1675 /* Initialize TX DMA engine */
1676 error = et_init_txdma(sc);
1684 et_init_tx_ring(struct et_softc *sc)
1686 struct et_txdesc_ring *tx_ring;
1687 struct et_txbuf_data *tbd;
1688 struct et_txstatus_data *txsd;
1690 tx_ring = &sc->sc_tx_ring;
1691 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1692 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1693 BUS_DMASYNC_PREWRITE);
1695 tbd = &sc->sc_tx_data;
1696 tbd->tbd_start_index = 0;
1697 tbd->tbd_start_wrap = 0;
1700 txsd = &sc->sc_tx_status;
1701 bzero(txsd->txsd_status, sizeof(uint32_t));
1702 bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1703 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1707 et_init_rx_ring(struct et_softc *sc)
1709 struct et_rxstatus_data *rxsd;
1710 struct et_rxstat_ring *rxst_ring;
1711 struct et_rxbuf_data *rbd;
1714 for (n = 0; n < ET_RX_NRING; ++n) {
1715 rbd = &sc->sc_rx_data[n];
1716 for (i = 0; i < ET_RX_NDESC; ++i) {
1717 error = rbd->rbd_newbuf(rbd, i);
1719 if_printf(sc->ifp, "%d ring %d buf, "
1720 "newbuf failed: %d\n", n, i, error);
1726 rxsd = &sc->sc_rx_status;
1727 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1728 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1729 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1731 rxst_ring = &sc->sc_rxstat_ring;
1732 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1733 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1734 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1740 et_init_rxdma(struct et_softc *sc)
1742 struct et_rxstatus_data *rxsd;
1743 struct et_rxstat_ring *rxst_ring;
1744 struct et_rxdesc_ring *rx_ring;
1747 error = et_stop_rxdma(sc);
1749 if_printf(sc->ifp, "can't init RX DMA engine\n");
1756 rxsd = &sc->sc_rx_status;
1757 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1758 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1761 * Install RX stat ring
1763 rxst_ring = &sc->sc_rxstat_ring;
1764 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1765 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1766 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1767 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1768 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1770 /* Match ET_RXSTAT_POS */
1771 rxst_ring->rsr_index = 0;
1772 rxst_ring->rsr_wrap = 0;
1775 * Install the 2nd RX descriptor ring
1777 rx_ring = &sc->sc_rx_ring[1];
1778 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1779 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1780 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1781 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1782 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1784 /* Match ET_RX_RING1_POS */
1785 rx_ring->rr_index = 0;
1786 rx_ring->rr_wrap = 1;
1789 * Install the 1st RX descriptor ring
1791 rx_ring = &sc->sc_rx_ring[0];
1792 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1793 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1794 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1795 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1796 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1798 /* Match ET_RX_RING0_POS */
1799 rx_ring->rr_index = 0;
1800 rx_ring->rr_wrap = 1;
1803 * RX intr moderation
1805 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1806 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1812 et_init_txdma(struct et_softc *sc)
1814 struct et_txdesc_ring *tx_ring;
1815 struct et_txstatus_data *txsd;
1818 error = et_stop_txdma(sc);
1820 if_printf(sc->ifp, "can't init TX DMA engine\n");
1825 * Install TX descriptor ring
1827 tx_ring = &sc->sc_tx_ring;
1828 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1829 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1830 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1835 txsd = &sc->sc_tx_status;
1836 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1837 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1839 CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1841 /* Match ET_TX_READY_POS */
1842 tx_ring->tr_ready_index = 0;
1843 tx_ring->tr_ready_wrap = 0;
1849 et_init_mac(struct et_softc *sc)
1852 const uint8_t *eaddr;
1856 CSR_WRITE_4(sc, ET_MAC_CFG1,
1857 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1858 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1859 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1862 * Setup inter packet gap
1864 val = (56 << ET_IPG_NONB2B_1_SHIFT) |
1865 (88 << ET_IPG_NONB2B_2_SHIFT) |
1866 (80 << ET_IPG_MINIFG_SHIFT) |
1867 (96 << ET_IPG_B2B_SHIFT);
1868 CSR_WRITE_4(sc, ET_IPG, val);
1871 * Setup half duplex mode
1873 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1874 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1875 (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1876 ET_MAC_HDX_EXC_DEFER;
1877 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1879 /* Clear MAC control */
1880 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1883 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1889 eaddr = IF_LLADDR(ifp);
1890 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1891 CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1892 val = (eaddr[0] << 16) | (eaddr[1] << 24);
1893 CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1895 /* Set max frame length */
1896 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1898 /* Bring MAC out of reset state */
1899 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1903 et_init_rxmac(struct et_softc *sc)
1906 const uint8_t *eaddr;
1910 /* Disable RX MAC and WOL */
1911 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1914 * Clear all WOL related registers
1916 for (i = 0; i < 3; ++i)
1917 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1918 for (i = 0; i < 20; ++i)
1919 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1922 * Set WOL source address. XXX is this necessary?
1925 eaddr = IF_LLADDR(ifp);
1926 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1927 CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1928 val = (eaddr[0] << 8) | eaddr[1];
1929 CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1931 /* Clear packet filters */
1932 CSR_WRITE_4(sc, ET_PKTFILT, 0);
1934 /* No ucast filtering */
1935 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1936 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1937 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1939 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1941 * In order to transmit jumbo packets greater than
1942 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1943 * RX MAC and RX DMA needs to be reduced in size to
1944 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In
1945 * order to implement this, we must use "cut through"
1946 * mode in the RX MAC, which chops packets down into
1947 * segments. In this case we selected 256 bytes,
1948 * since this is the size of the PCI-Express TLP's
1949 * that the ET1310 uses.
1951 val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) |
1952 ET_RXMAC_MC_SEGSZ_ENABLE;
1956 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1958 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1960 /* Initialize RX MAC management register */
1961 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1963 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1965 CSR_WRITE_4(sc, ET_RXMAC_MGT,
1966 ET_RXMAC_MGT_PASS_ECRC |
1967 ET_RXMAC_MGT_PASS_ELEN |
1968 ET_RXMAC_MGT_PASS_ETRUNC |
1969 ET_RXMAC_MGT_CHECK_PKT);
1972 * Configure runt filtering (may not work on certain chip generation)
1974 val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) &
1975 ET_PKTFILT_MINLEN_MASK;
1976 val |= ET_PKTFILT_FRAG;
1977 CSR_WRITE_4(sc, ET_PKTFILT, val);
1979 /* Enable RX MAC but leave WOL disabled */
1980 CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1981 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1984 * Setup multicast hash and allmulti/promisc mode
1990 et_init_txmac(struct et_softc *sc)
1993 /* Disable TX MAC and FC(?) */
1994 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1997 * Initialize pause time.
1998 * This register should be set before XON/XOFF frame is
2001 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0 << ET_TXMAC_FLOWCTRL_CFPT_SHIFT);
2003 /* Enable TX MAC but leave FC(?) diabled */
2004 CSR_WRITE_4(sc, ET_TXMAC_CTRL,
2005 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
2009 et_start_rxdma(struct et_softc *sc)
2013 val = (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) |
2014 ET_RXDMA_CTRL_RING0_ENABLE;
2015 val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) |
2016 ET_RXDMA_CTRL_RING1_ENABLE;
2018 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
2022 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
2023 if_printf(sc->ifp, "can't start RX DMA engine\n");
2030 et_start_txdma(struct et_softc *sc)
2033 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
2038 et_rxeof(struct et_softc *sc)
2040 struct et_rxstatus_data *rxsd;
2041 struct et_rxstat_ring *rxst_ring;
2042 struct et_rxbuf_data *rbd;
2043 struct et_rxdesc_ring *rx_ring;
2044 struct et_rxstat *st;
2047 uint32_t rxstat_pos, rxring_pos;
2048 uint32_t rxst_info1, rxst_info2, rxs_stat_ring;
2049 int buflen, buf_idx, npost[2], ring_idx;
2050 int rxst_index, rxst_wrap;
2055 rxsd = &sc->sc_rx_status;
2056 rxst_ring = &sc->sc_rxstat_ring;
2058 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2061 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2062 BUS_DMASYNC_POSTREAD);
2063 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2064 BUS_DMASYNC_POSTREAD);
2066 npost[0] = npost[1] = 0;
2067 rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring);
2068 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
2069 rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >>
2070 ET_RXS_STATRING_INDEX_SHIFT;
2072 while (rxst_index != rxst_ring->rsr_index ||
2073 rxst_wrap != rxst_ring->rsr_wrap) {
2074 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2077 MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
2078 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
2079 rxst_info1 = le32toh(st->rxst_info1);
2080 rxst_info2 = le32toh(st->rxst_info2);
2081 buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >>
2082 ET_RXST_INFO2_LEN_SHIFT;
2083 buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >>
2084 ET_RXST_INFO2_BUFIDX_SHIFT;
2085 ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >>
2086 ET_RXST_INFO2_RINGIDX_SHIFT;
2088 if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
2089 rxst_ring->rsr_index = 0;
2090 rxst_ring->rsr_wrap ^= 1;
2092 rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK;
2093 if (rxst_ring->rsr_wrap)
2094 rxstat_pos |= ET_RXSTAT_POS_WRAP;
2095 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
2097 if (ring_idx >= ET_RX_NRING) {
2098 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2099 if_printf(ifp, "invalid ring index %d\n", ring_idx);
2102 if (buf_idx >= ET_RX_NDESC) {
2103 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2104 if_printf(ifp, "invalid buf index %d\n", buf_idx);
2108 rbd = &sc->sc_rx_data[ring_idx];
2109 m = rbd->rbd_buf[buf_idx].rb_mbuf;
2110 if ((rxst_info1 & ET_RXST_INFO1_OK) == 0){
2111 /* Discard errored frame. */
2112 rbd->rbd_discard(rbd, buf_idx);
2113 } else if (rbd->rbd_newbuf(rbd, buf_idx) != 0) {
2114 /* No available mbufs, discard it. */
2115 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2116 rbd->rbd_discard(rbd, buf_idx);
2118 buflen -= ETHER_CRC_LEN;
2119 if (buflen < ETHER_HDR_LEN) {
2121 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2123 m->m_pkthdr.len = m->m_len = buflen;
2124 m->m_pkthdr.rcvif = ifp;
2126 ifp->if_input(ifp, m);
2131 rx_ring = &sc->sc_rx_ring[ring_idx];
2132 if (buf_idx != rx_ring->rr_index) {
2134 "WARNING!! ring %d, buf_idx %d, rr_idx %d\n",
2135 ring_idx, buf_idx, rx_ring->rr_index);
2138 MPASS(rx_ring->rr_index < ET_RX_NDESC);
2139 if (++rx_ring->rr_index == ET_RX_NDESC) {
2140 rx_ring->rr_index = 0;
2141 rx_ring->rr_wrap ^= 1;
2143 rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK;
2144 if (rx_ring->rr_wrap)
2145 rxring_pos |= ET_RX_RING_POS_WRAP;
2146 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
2149 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2150 BUS_DMASYNC_PREREAD);
2151 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2152 BUS_DMASYNC_PREREAD);
2156 et_encap(struct et_softc *sc, struct mbuf **m0)
2158 struct et_txdesc_ring *tx_ring;
2159 struct et_txbuf_data *tbd;
2160 struct et_txdesc *td;
2162 bus_dma_segment_t segs[ET_NSEG_MAX];
2164 uint32_t csum_flags, last_td_ctrl2;
2165 int error, i, idx, first_idx, last_idx, nsegs;
2167 tx_ring = &sc->sc_tx_ring;
2168 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2169 tbd = &sc->sc_tx_data;
2170 first_idx = tx_ring->tr_ready_index;
2171 map = tbd->tbd_buf[first_idx].tb_dmap;
2173 error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, &nsegs,
2175 if (error == EFBIG) {
2176 m = m_collapse(*m0, M_NOWAIT, ET_NSEG_MAX);
2183 error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs,
2190 } else if (error != 0)
2193 /* Check for descriptor overruns. */
2194 if (tbd->tbd_used + nsegs > ET_TX_NDESC - 1) {
2195 bus_dmamap_unload(sc->sc_tx_tag, map);
2198 bus_dmamap_sync(sc->sc_tx_tag, map, BUS_DMASYNC_PREWRITE);
2200 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2202 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2203 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2204 last_td_ctrl2 |= ET_TDCTRL2_INTR;
2209 if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) {
2210 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2211 csum_flags |= ET_TDCTRL2_CSUM_IP;
2212 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2213 csum_flags |= ET_TDCTRL2_CSUM_UDP;
2214 else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2215 csum_flags |= ET_TDCTRL2_CSUM_TCP;
2218 for (i = 0; i < nsegs; ++i) {
2219 idx = (first_idx + i) % ET_TX_NDESC;
2220 td = &tx_ring->tr_desc[idx];
2221 td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr));
2222 td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr));
2223 td->td_ctrl1 = htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK);
2224 if (i == nsegs - 1) {
2226 td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags);
2229 td->td_ctrl2 = htole32(csum_flags);
2231 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2232 if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2233 tx_ring->tr_ready_index = 0;
2234 tx_ring->tr_ready_wrap ^= 1;
2237 td = &tx_ring->tr_desc[first_idx];
2239 td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG);
2241 MPASS(last_idx >= 0);
2242 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2243 tbd->tbd_buf[last_idx].tb_dmap = map;
2244 tbd->tbd_buf[last_idx].tb_mbuf = m;
2246 tbd->tbd_used += nsegs;
2247 MPASS(tbd->tbd_used <= ET_TX_NDESC);
2253 et_txeof(struct et_softc *sc)
2255 struct et_txdesc_ring *tx_ring;
2256 struct et_txbuf_data *tbd;
2257 struct et_txbuf *tb;
2265 tx_ring = &sc->sc_tx_ring;
2266 tbd = &sc->sc_tx_data;
2268 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2271 if (tbd->tbd_used == 0)
2274 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2275 BUS_DMASYNC_POSTWRITE);
2277 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2278 end = tx_done & ET_TX_DONE_POS_INDEX_MASK;
2279 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2281 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2282 MPASS(tbd->tbd_start_index < ET_TX_NDESC);
2283 tb = &tbd->tbd_buf[tbd->tbd_start_index];
2284 if (tb->tb_mbuf != NULL) {
2285 bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
2286 BUS_DMASYNC_POSTWRITE);
2287 bus_dmamap_unload(sc->sc_tx_tag, tb->tb_dmap);
2288 m_freem(tb->tb_mbuf);
2292 if (++tbd->tbd_start_index == ET_TX_NDESC) {
2293 tbd->tbd_start_index = 0;
2294 tbd->tbd_start_wrap ^= 1;
2297 MPASS(tbd->tbd_used > 0);
2301 if (tbd->tbd_used == 0)
2302 sc->watchdog_timer = 0;
2303 if (tbd->tbd_used + ET_NSEG_SPARE < ET_TX_NDESC)
2304 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2310 struct et_softc *sc;
2312 struct mii_data *mii;
2317 mii = device_get_softc(sc->sc_miibus);
2320 et_stats_update(sc);
2321 if (et_watchdog(sc) == EJUSTRETURN)
2323 callout_reset(&sc->sc_tick, hz, et_tick, sc);
2327 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx)
2329 struct et_softc *sc;
2330 struct et_rxdesc *desc;
2331 struct et_rxbuf *rb;
2333 bus_dma_segment_t segs[1];
2337 MPASS(buf_idx < ET_RX_NDESC);
2338 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2341 m->m_len = m->m_pkthdr.len = MCLBYTES;
2342 m_adj(m, ETHER_ALIGN);
2344 sc = rbd->rbd_softc;
2345 rb = &rbd->rbd_buf[buf_idx];
2347 if (bus_dmamap_load_mbuf_sg(sc->sc_rx_tag, sc->sc_rx_sparemap, m,
2348 segs, &nsegs, 0) != 0) {
2352 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2354 if (rb->rb_mbuf != NULL) {
2355 bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap,
2356 BUS_DMASYNC_POSTREAD);
2357 bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
2360 rb->rb_dmap = sc->sc_rx_sparemap;
2361 sc->sc_rx_sparemap = dmap;
2362 bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2365 desc = &rbd->rbd_ring->rr_desc[buf_idx];
2366 desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2367 desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2368 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2369 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2370 BUS_DMASYNC_PREWRITE);
2375 et_rxbuf_discard(struct et_rxbuf_data *rbd, int buf_idx)
2377 struct et_rxdesc *desc;
2379 desc = &rbd->rbd_ring->rr_desc[buf_idx];
2380 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2381 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2382 BUS_DMASYNC_PREWRITE);
2386 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx)
2388 struct et_softc *sc;
2389 struct et_rxdesc *desc;
2390 struct et_rxbuf *rb;
2392 bus_dma_segment_t segs[1];
2396 MPASS(buf_idx < ET_RX_NDESC);
2397 MGETHDR(m, M_NOWAIT, MT_DATA);
2400 m->m_len = m->m_pkthdr.len = MHLEN;
2401 m_adj(m, ETHER_ALIGN);
2403 sc = rbd->rbd_softc;
2404 rb = &rbd->rbd_buf[buf_idx];
2406 if (bus_dmamap_load_mbuf_sg(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap,
2407 m, segs, &nsegs, 0) != 0) {
2411 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2413 if (rb->rb_mbuf != NULL) {
2414 bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap,
2415 BUS_DMASYNC_POSTREAD);
2416 bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
2419 rb->rb_dmap = sc->sc_rx_mini_sparemap;
2420 sc->sc_rx_mini_sparemap = dmap;
2421 bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2424 desc = &rbd->rbd_ring->rr_desc[buf_idx];
2425 desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2426 desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2427 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2428 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2429 BUS_DMASYNC_PREWRITE);
2433 #define ET_SYSCTL_STAT_ADD32(c, h, n, p, d) \
2434 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2435 #define ET_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2436 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2439 * Create sysctl tree
2442 et_add_sysctls(struct et_softc * sc)
2444 struct sysctl_ctx_list *ctx;
2445 struct sysctl_oid_list *children, *parent;
2446 struct sysctl_oid *tree;
2447 struct et_hw_stats *stats;
2449 ctx = device_get_sysctl_ctx(sc->dev);
2450 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2452 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
2453 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I",
2454 "RX IM, # packets per RX interrupt");
2455 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
2456 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I",
2457 "RX IM, RX interrupt delay (x10 usec)");
2458 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
2459 CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
2460 "TX IM, # segments per TX interrupt");
2461 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
2462 CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
2464 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
2465 NULL, "ET statistics");
2466 parent = SYSCTL_CHILDREN(tree);
2468 /* TX/RX statistics. */
2469 stats = &sc->sc_stats;
2470 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_64", &stats->pkts_64,
2471 "0 to 64 bytes frames");
2472 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_65_127", &stats->pkts_65,
2473 "65 to 127 bytes frames");
2474 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_128_255", &stats->pkts_128,
2475 "128 to 255 bytes frames");
2476 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_256_511", &stats->pkts_256,
2477 "256 to 511 bytes frames");
2478 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_512_1023", &stats->pkts_512,
2479 "512 to 1023 bytes frames");
2480 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1024_1518", &stats->pkts_1024,
2481 "1024 to 1518 bytes frames");
2482 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1519_1522", &stats->pkts_1519,
2483 "1519 to 1522 bytes frames");
2485 /* RX statistics. */
2486 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2487 NULL, "RX MAC statistics");
2488 children = SYSCTL_CHILDREN(tree);
2489 ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
2490 &stats->rx_bytes, "Good bytes");
2491 ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
2492 &stats->rx_frames, "Good frames");
2493 ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
2494 &stats->rx_crcerrs, "CRC errors");
2495 ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
2496 &stats->rx_mcast, "Multicast frames");
2497 ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
2498 &stats->rx_bcast, "Broadcast frames");
2499 ET_SYSCTL_STAT_ADD32(ctx, children, "control",
2500 &stats->rx_control, "Control frames");
2501 ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
2502 &stats->rx_pause, "Pause frames");
2503 ET_SYSCTL_STAT_ADD32(ctx, children, "unknown_control",
2504 &stats->rx_unknown_control, "Unknown control frames");
2505 ET_SYSCTL_STAT_ADD32(ctx, children, "align_errs",
2506 &stats->rx_alignerrs, "Alignment errors");
2507 ET_SYSCTL_STAT_ADD32(ctx, children, "len_errs",
2508 &stats->rx_lenerrs, "Frames with length mismatched");
2509 ET_SYSCTL_STAT_ADD32(ctx, children, "code_errs",
2510 &stats->rx_codeerrs, "Frames with code error");
2511 ET_SYSCTL_STAT_ADD32(ctx, children, "cs_errs",
2512 &stats->rx_cserrs, "Frames with carrier sense error");
2513 ET_SYSCTL_STAT_ADD32(ctx, children, "runts",
2514 &stats->rx_runts, "Too short frames");
2515 ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
2516 &stats->rx_oversize, "Oversized frames");
2517 ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
2518 &stats->rx_fragments, "Fragmented frames");
2519 ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
2520 &stats->rx_jabbers, "Frames with jabber error");
2521 ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
2522 &stats->rx_drop, "Dropped frames");
2524 /* TX statistics. */
2525 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2526 NULL, "TX MAC statistics");
2527 children = SYSCTL_CHILDREN(tree);
2528 ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
2529 &stats->tx_bytes, "Good bytes");
2530 ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
2531 &stats->tx_frames, "Good frames");
2532 ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
2533 &stats->tx_mcast, "Multicast frames");
2534 ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
2535 &stats->tx_bcast, "Broadcast frames");
2536 ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
2537 &stats->tx_pause, "Pause frames");
2538 ET_SYSCTL_STAT_ADD32(ctx, children, "deferred",
2539 &stats->tx_deferred, "Deferred frames");
2540 ET_SYSCTL_STAT_ADD32(ctx, children, "excess_deferred",
2541 &stats->tx_excess_deferred, "Excessively deferred frames");
2542 ET_SYSCTL_STAT_ADD32(ctx, children, "single_colls",
2543 &stats->tx_single_colls, "Single collisions");
2544 ET_SYSCTL_STAT_ADD32(ctx, children, "multi_colls",
2545 &stats->tx_multi_colls, "Multiple collisions");
2546 ET_SYSCTL_STAT_ADD32(ctx, children, "late_colls",
2547 &stats->tx_late_colls, "Late collisions");
2548 ET_SYSCTL_STAT_ADD32(ctx, children, "excess_colls",
2549 &stats->tx_excess_colls, "Excess collisions");
2550 ET_SYSCTL_STAT_ADD32(ctx, children, "total_colls",
2551 &stats->tx_total_colls, "Total collisions");
2552 ET_SYSCTL_STAT_ADD32(ctx, children, "pause_honored",
2553 &stats->tx_pause_honored, "Honored pause frames");
2554 ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
2555 &stats->tx_drop, "Dropped frames");
2556 ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
2557 &stats->tx_jabbers, "Frames with jabber errors");
2558 ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
2559 &stats->tx_crcerrs, "Frames with CRC errors");
2560 ET_SYSCTL_STAT_ADD32(ctx, children, "control",
2561 &stats->tx_control, "Control frames");
2562 ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
2563 &stats->tx_oversize, "Oversized frames");
2564 ET_SYSCTL_STAT_ADD32(ctx, children, "undersize",
2565 &stats->tx_undersize, "Undersized frames");
2566 ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
2567 &stats->tx_fragments, "Fragmented frames");
2570 #undef ET_SYSCTL_STAT_ADD32
2571 #undef ET_SYSCTL_STAT_ADD64
2574 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2576 struct et_softc *sc;
2582 v = sc->sc_rx_intr_npkts;
2583 error = sysctl_handle_int(oidp, &v, 0, req);
2584 if (error || req->newptr == NULL)
2591 if (sc->sc_rx_intr_npkts != v) {
2592 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2593 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2594 sc->sc_rx_intr_npkts = v;
2601 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2603 struct et_softc *sc;
2609 v = sc->sc_rx_intr_delay;
2610 error = sysctl_handle_int(oidp, &v, 0, req);
2611 if (error || req->newptr == NULL)
2618 if (sc->sc_rx_intr_delay != v) {
2619 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2620 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2621 sc->sc_rx_intr_delay = v;
2628 et_stats_update(struct et_softc *sc)
2630 struct et_hw_stats *stats;
2632 stats = &sc->sc_stats;
2633 stats->pkts_64 += CSR_READ_4(sc, ET_STAT_PKTS_64);
2634 stats->pkts_65 += CSR_READ_4(sc, ET_STAT_PKTS_65_127);
2635 stats->pkts_128 += CSR_READ_4(sc, ET_STAT_PKTS_128_255);
2636 stats->pkts_256 += CSR_READ_4(sc, ET_STAT_PKTS_256_511);
2637 stats->pkts_512 += CSR_READ_4(sc, ET_STAT_PKTS_512_1023);
2638 stats->pkts_1024 += CSR_READ_4(sc, ET_STAT_PKTS_1024_1518);
2639 stats->pkts_1519 += CSR_READ_4(sc, ET_STAT_PKTS_1519_1522);
2641 stats->rx_bytes += CSR_READ_4(sc, ET_STAT_RX_BYTES);
2642 stats->rx_frames += CSR_READ_4(sc, ET_STAT_RX_FRAMES);
2643 stats->rx_crcerrs += CSR_READ_4(sc, ET_STAT_RX_CRC_ERR);
2644 stats->rx_mcast += CSR_READ_4(sc, ET_STAT_RX_MCAST);
2645 stats->rx_bcast += CSR_READ_4(sc, ET_STAT_RX_BCAST);
2646 stats->rx_control += CSR_READ_4(sc, ET_STAT_RX_CTL);
2647 stats->rx_pause += CSR_READ_4(sc, ET_STAT_RX_PAUSE);
2648 stats->rx_unknown_control += CSR_READ_4(sc, ET_STAT_RX_UNKNOWN_CTL);
2649 stats->rx_alignerrs += CSR_READ_4(sc, ET_STAT_RX_ALIGN_ERR);
2650 stats->rx_lenerrs += CSR_READ_4(sc, ET_STAT_RX_LEN_ERR);
2651 stats->rx_codeerrs += CSR_READ_4(sc, ET_STAT_RX_CODE_ERR);
2652 stats->rx_cserrs += CSR_READ_4(sc, ET_STAT_RX_CS_ERR);
2653 stats->rx_runts += CSR_READ_4(sc, ET_STAT_RX_RUNT);
2654 stats->rx_oversize += CSR_READ_4(sc, ET_STAT_RX_OVERSIZE);
2655 stats->rx_fragments += CSR_READ_4(sc, ET_STAT_RX_FRAG);
2656 stats->rx_jabbers += CSR_READ_4(sc, ET_STAT_RX_JABBER);
2657 stats->rx_drop += CSR_READ_4(sc, ET_STAT_RX_DROP);
2659 stats->tx_bytes += CSR_READ_4(sc, ET_STAT_TX_BYTES);
2660 stats->tx_frames += CSR_READ_4(sc, ET_STAT_TX_FRAMES);
2661 stats->tx_mcast += CSR_READ_4(sc, ET_STAT_TX_MCAST);
2662 stats->tx_bcast += CSR_READ_4(sc, ET_STAT_TX_BCAST);
2663 stats->tx_pause += CSR_READ_4(sc, ET_STAT_TX_PAUSE);
2664 stats->tx_deferred += CSR_READ_4(sc, ET_STAT_TX_DEFER);
2665 stats->tx_excess_deferred += CSR_READ_4(sc, ET_STAT_TX_EXCESS_DEFER);
2666 stats->tx_single_colls += CSR_READ_4(sc, ET_STAT_TX_SINGLE_COL);
2667 stats->tx_multi_colls += CSR_READ_4(sc, ET_STAT_TX_MULTI_COL);
2668 stats->tx_late_colls += CSR_READ_4(sc, ET_STAT_TX_LATE_COL);
2669 stats->tx_excess_colls += CSR_READ_4(sc, ET_STAT_TX_EXCESS_COL);
2670 stats->tx_total_colls += CSR_READ_4(sc, ET_STAT_TX_TOTAL_COL);
2671 stats->tx_pause_honored += CSR_READ_4(sc, ET_STAT_TX_PAUSE_HONOR);
2672 stats->tx_drop += CSR_READ_4(sc, ET_STAT_TX_DROP);
2673 stats->tx_jabbers += CSR_READ_4(sc, ET_STAT_TX_JABBER);
2674 stats->tx_crcerrs += CSR_READ_4(sc, ET_STAT_TX_CRC_ERR);
2675 stats->tx_control += CSR_READ_4(sc, ET_STAT_TX_CTL);
2676 stats->tx_oversize += CSR_READ_4(sc, ET_STAT_TX_OVERSIZE);
2677 stats->tx_undersize += CSR_READ_4(sc, ET_STAT_TX_UNDERSIZE);
2678 stats->tx_fragments += CSR_READ_4(sc, ET_STAT_TX_FRAG);
2682 et_get_counter(struct ifnet *ifp, ift_counter cnt)
2684 struct et_softc *sc;
2685 struct et_hw_stats *stats;
2687 sc = if_getsoftc(ifp);
2688 stats = &sc->sc_stats;
2691 case IFCOUNTER_OPACKETS:
2692 return (stats->tx_frames);
2693 case IFCOUNTER_COLLISIONS:
2694 return (stats->tx_total_colls);
2695 case IFCOUNTER_OERRORS:
2696 return (stats->tx_drop + stats->tx_jabbers +
2697 stats->tx_crcerrs + stats->tx_excess_deferred +
2698 stats->tx_late_colls);
2699 case IFCOUNTER_IPACKETS:
2700 return (stats->rx_frames);
2701 case IFCOUNTER_IERRORS:
2702 return (stats->rx_crcerrs + stats->rx_alignerrs +
2703 stats->rx_lenerrs + stats->rx_codeerrs + stats->rx_cserrs +
2704 stats->rx_runts + stats->rx_jabbers + stats->rx_drop);
2706 return (if_get_counter_default(ifp, cnt));
2711 et_suspend(device_t dev)
2713 struct et_softc *sc;
2716 sc = device_get_softc(dev);
2718 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2720 /* Diable all clocks and put PHY into COMA. */
2721 pmcfg = CSR_READ_4(sc, ET_PM);
2722 pmcfg &= ~(EM_PM_GIGEPHY_ENB | ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE |
2724 pmcfg |= ET_PM_PHY_SW_COMA;
2725 CSR_WRITE_4(sc, ET_PM, pmcfg);
2731 et_resume(device_t dev)
2733 struct et_softc *sc;
2736 sc = device_get_softc(dev);
2738 /* Take PHY out of COMA and enable clocks. */
2739 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
2740 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
2741 pmcfg |= EM_PM_GIGEPHY_ENB;
2742 CSR_WRITE_4(sc, ET_PM, pmcfg);
2743 if ((sc->ifp->if_flags & IFF_UP) != 0)