2 * Copyright (c) 2007 Sepherosa Ziehau. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/endian.h>
43 #include <sys/kernel.h>
45 #include <sys/malloc.h>
49 #include <sys/module.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
54 #include <net/ethernet.h>
56 #include <net/if_dl.h>
57 #include <net/if_types.h>
59 #include <net/if_arp.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_vlan_var.h>
64 #include <machine/bus.h>
66 #include <dev/mii/mii.h>
67 #include <dev/mii/miivar.h>
69 #include <dev/pci/pcireg.h>
70 #include <dev/pci/pcivar.h>
72 #include <dev/et/if_etreg.h>
73 #include <dev/et/if_etvar.h>
75 #include "miibus_if.h"
77 MODULE_DEPEND(et, pci, 1, 1, 1);
78 MODULE_DEPEND(et, ether, 1, 1, 1);
79 MODULE_DEPEND(et, miibus, 1, 1, 1);
82 static int msi_disable = 0;
83 TUNABLE_INT("hw.et.msi_disable", &msi_disable);
85 #define ET_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
87 static int et_probe(device_t);
88 static int et_attach(device_t);
89 static int et_detach(device_t);
90 static int et_shutdown(device_t);
91 static int et_suspend(device_t);
92 static int et_resume(device_t);
94 static int et_miibus_readreg(device_t, int, int);
95 static int et_miibus_writereg(device_t, int, int, int);
96 static void et_miibus_statchg(device_t);
98 static void et_init_locked(struct et_softc *);
99 static void et_init(void *);
100 static int et_ioctl(struct ifnet *, u_long, caddr_t);
101 static void et_start_locked(struct ifnet *);
102 static void et_start(struct ifnet *);
103 static int et_watchdog(struct et_softc *);
104 static int et_ifmedia_upd_locked(struct ifnet *);
105 static int et_ifmedia_upd(struct ifnet *);
106 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
108 static void et_add_sysctls(struct et_softc *);
109 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
110 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
112 static void et_intr(void *);
113 static void et_rxeof(struct et_softc *);
114 static void et_txeof(struct et_softc *);
116 static int et_dma_alloc(struct et_softc *);
117 static void et_dma_free(struct et_softc *);
118 static void et_dma_map_addr(void *, bus_dma_segment_t *, int, int);
119 static int et_dma_ring_alloc(struct et_softc *, bus_size_t, bus_size_t,
120 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *,
122 static void et_dma_ring_free(struct et_softc *, bus_dma_tag_t *, uint8_t **,
124 static void et_init_tx_ring(struct et_softc *);
125 static int et_init_rx_ring(struct et_softc *);
126 static void et_free_tx_ring(struct et_softc *);
127 static void et_free_rx_ring(struct et_softc *);
128 static int et_encap(struct et_softc *, struct mbuf **);
129 static int et_newbuf_cluster(struct et_rxbuf_data *, int);
130 static int et_newbuf_hdr(struct et_rxbuf_data *, int);
131 static void et_rxbuf_discard(struct et_rxbuf_data *, int);
133 static void et_stop(struct et_softc *);
134 static int et_chip_init(struct et_softc *);
135 static void et_chip_attach(struct et_softc *);
136 static void et_init_mac(struct et_softc *);
137 static void et_init_rxmac(struct et_softc *);
138 static void et_init_txmac(struct et_softc *);
139 static int et_init_rxdma(struct et_softc *);
140 static int et_init_txdma(struct et_softc *);
141 static int et_start_rxdma(struct et_softc *);
142 static int et_start_txdma(struct et_softc *);
143 static int et_stop_rxdma(struct et_softc *);
144 static int et_stop_txdma(struct et_softc *);
145 static void et_reset(struct et_softc *);
146 static int et_bus_config(struct et_softc *);
147 static void et_get_eaddr(device_t, uint8_t[]);
148 static void et_setmulti(struct et_softc *);
149 static void et_tick(void *);
150 static void et_stats_update(struct et_softc *);
152 static const struct et_dev {
157 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
158 "Agere ET1310 Gigabit Ethernet" },
159 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
160 "Agere ET1310 Fast Ethernet" },
164 static device_method_t et_methods[] = {
165 DEVMETHOD(device_probe, et_probe),
166 DEVMETHOD(device_attach, et_attach),
167 DEVMETHOD(device_detach, et_detach),
168 DEVMETHOD(device_shutdown, et_shutdown),
169 DEVMETHOD(device_suspend, et_suspend),
170 DEVMETHOD(device_resume, et_resume),
172 DEVMETHOD(bus_print_child, bus_generic_print_child),
173 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
175 DEVMETHOD(miibus_readreg, et_miibus_readreg),
176 DEVMETHOD(miibus_writereg, et_miibus_writereg),
177 DEVMETHOD(miibus_statchg, et_miibus_statchg),
182 static driver_t et_driver = {
185 sizeof(struct et_softc)
188 static devclass_t et_devclass;
190 DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0);
191 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0);
193 static int et_rx_intr_npkts = 32;
194 static int et_rx_intr_delay = 20; /* x10 usec */
195 static int et_tx_intr_nsegs = 126;
196 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
198 TUNABLE_INT("hw.et.timer", &et_timer);
199 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
200 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
201 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
204 et_probe(device_t dev)
206 const struct et_dev *d;
209 vid = pci_get_vendor(dev);
210 did = pci_get_device(dev);
212 for (d = et_devices; d->desc != NULL; ++d) {
213 if (vid == d->vid && did == d->did) {
214 device_set_desc(dev, d->desc);
215 return (BUS_PROBE_DEFAULT);
222 et_attach(device_t dev)
226 uint8_t eaddr[ETHER_ADDR_LEN];
227 int cap, error, msic;
229 sc = device_get_softc(dev);
231 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
233 callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
235 ifp = sc->ifp = if_alloc(IFT_ETHER);
237 device_printf(dev, "can not if_alloc()\n");
243 * Initialize tunables
245 sc->sc_rx_intr_npkts = et_rx_intr_npkts;
246 sc->sc_rx_intr_delay = et_rx_intr_delay;
247 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
248 sc->sc_timer = et_timer;
250 /* Enable bus mastering */
251 pci_enable_busmaster(dev);
256 sc->sc_mem_rid = ET_PCIR_BAR;
257 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
258 &sc->sc_mem_rid, RF_ACTIVE);
259 if (sc->sc_mem_res == NULL) {
260 device_printf(dev, "can't allocate IO memory\n");
265 if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) == 0) {
267 sc->sc_flags |= ET_FLAG_PCIE;
268 msic = pci_msi_count(dev);
270 device_printf(dev, "MSI count: %d\n", msic);
272 if (msic > 0 && msi_disable == 0) {
274 if (pci_alloc_msi(dev, &msic) == 0) {
276 device_printf(dev, "Using %d MSI message\n",
278 sc->sc_flags |= ET_FLAG_MSI;
280 pci_release_msi(dev);
287 if ((sc->sc_flags & ET_FLAG_MSI) == 0) {
289 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
290 &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE);
293 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
294 &sc->sc_irq_rid, RF_ACTIVE);
296 if (sc->sc_irq_res == NULL) {
297 device_printf(dev, "can't allocate irq\n");
302 if (pci_get_device(dev) == PCI_PRODUCT_LUCENT_ET1310_FAST)
303 sc->sc_flags |= ET_FLAG_FASTETHER;
305 error = et_bus_config(sc);
309 et_get_eaddr(dev, eaddr);
311 CSR_WRITE_4(sc, ET_PM,
312 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
316 error = et_dma_alloc(sc);
321 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
322 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
323 ifp->if_init = et_init;
324 ifp->if_ioctl = et_ioctl;
325 ifp->if_start = et_start;
326 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU;
327 ifp->if_capenable = ifp->if_capabilities;
328 ifp->if_snd.ifq_drv_maxlen = ET_TX_NDESC - 1;
329 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC - 1);
330 IFQ_SET_READY(&ifp->if_snd);
334 error = mii_attach(dev, &sc->sc_miibus, ifp, et_ifmedia_upd,
335 et_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
337 device_printf(dev, "attaching PHYs failed\n");
341 ether_ifattach(ifp, eaddr);
343 /* Tell the upper layer(s) we support long frames. */
344 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
346 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
347 NULL, et_intr, sc, &sc->sc_irq_handle);
350 device_printf(dev, "can't setup intr\n");
363 et_detach(device_t dev)
365 struct et_softc *sc = device_get_softc(dev);
367 if (device_is_attached(dev)) {
368 ether_ifdetach(sc->ifp);
372 callout_drain(&sc->sc_tick);
375 if (sc->sc_miibus != NULL)
376 device_delete_child(dev, sc->sc_miibus);
377 bus_generic_detach(dev);
379 if (sc->sc_irq_handle != NULL)
380 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
381 if (sc->sc_irq_res != NULL)
382 bus_release_resource(dev, SYS_RES_IRQ,
383 rman_get_rid(sc->sc_irq_res), sc->sc_irq_res);
384 if ((sc->sc_flags & ET_FLAG_MSI) != 0)
385 pci_release_msi(dev);
386 if (sc->sc_mem_res != NULL)
387 bus_release_resource(dev, SYS_RES_MEMORY,
388 rman_get_rid(sc->sc_mem_res), sc->sc_mem_res);
395 mtx_destroy(&sc->sc_mtx);
401 et_shutdown(device_t dev)
403 struct et_softc *sc = device_get_softc(dev);
412 et_miibus_readreg(device_t dev, int phy, int reg)
414 struct et_softc *sc = device_get_softc(dev);
418 /* Stop any pending operations */
419 CSR_WRITE_4(sc, ET_MII_CMD, 0);
421 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
422 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
423 CSR_WRITE_4(sc, ET_MII_ADDR, val);
426 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
430 for (i = 0; i < NRETRY; ++i) {
431 val = CSR_READ_4(sc, ET_MII_IND);
432 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
438 "read phy %d, reg %d timed out\n", phy, reg);
445 val = CSR_READ_4(sc, ET_MII_STAT);
446 ret = val & ET_MII_STAT_VALUE_MASK;
449 /* Make sure that the current operation is stopped */
450 CSR_WRITE_4(sc, ET_MII_CMD, 0);
455 et_miibus_writereg(device_t dev, int phy, int reg, int val0)
457 struct et_softc *sc = device_get_softc(dev);
461 /* Stop any pending operations */
462 CSR_WRITE_4(sc, ET_MII_CMD, 0);
464 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
465 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
466 CSR_WRITE_4(sc, ET_MII_ADDR, val);
469 CSR_WRITE_4(sc, ET_MII_CTRL,
470 (val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK);
474 for (i = 0; i < NRETRY; ++i) {
475 val = CSR_READ_4(sc, ET_MII_IND);
476 if ((val & ET_MII_IND_BUSY) == 0)
482 "write phy %d, reg %d timed out\n", phy, reg);
483 et_miibus_readreg(dev, phy, reg);
488 /* Make sure that the current operation is stopped */
489 CSR_WRITE_4(sc, ET_MII_CMD, 0);
494 et_miibus_statchg(device_t dev)
497 struct mii_data *mii;
499 uint32_t cfg1, cfg2, ctrl;
502 sc = device_get_softc(dev);
504 mii = device_get_softc(sc->sc_miibus);
506 if (mii == NULL || ifp == NULL ||
507 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
510 sc->sc_flags &= ~ET_FLAG_LINK;
511 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
512 (IFM_ACTIVE | IFM_AVALID)) {
513 switch (IFM_SUBTYPE(mii->mii_media_active)) {
516 sc->sc_flags |= ET_FLAG_LINK;
519 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
520 sc->sc_flags |= ET_FLAG_LINK;
525 /* XXX Stop TX/RX MAC? */
526 if ((sc->sc_flags & ET_FLAG_LINK) == 0)
529 /* Program MACs with resolved speed/duplex/flow-control. */
530 ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
531 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
532 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
533 cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
534 ET_MAC_CFG1_LOOPBACK);
535 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
536 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
537 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
538 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
539 ((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) &
540 ET_MAC_CFG2_PREAMBLE_LEN_MASK);
542 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
543 cfg2 |= ET_MAC_CFG2_MODE_GMII;
545 cfg2 |= ET_MAC_CFG2_MODE_MII;
546 ctrl |= ET_MAC_CTRL_MODE_MII;
549 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
550 cfg2 |= ET_MAC_CFG2_FDX;
552 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE)
553 cfg1 |= ET_MAC_CFG1_TXFLOW;
554 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE)
555 cfg1 |= ET_MAC_CFG1_RXFLOW;
558 ctrl |= ET_MAC_CTRL_GHDX;
560 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
561 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
562 cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
563 CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1);
567 for (i = 0; i < NRETRY; ++i) {
568 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
569 if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
570 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
575 if_printf(ifp, "can't enable RX/TX\n");
576 sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
582 et_ifmedia_upd_locked(struct ifnet *ifp)
584 struct et_softc *sc = ifp->if_softc;
585 struct mii_data *mii = device_get_softc(sc->sc_miibus);
586 struct mii_softc *miisc;
588 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
589 mii_phy_reset(miisc);
590 return (mii_mediachg(mii));
594 et_ifmedia_upd(struct ifnet *ifp)
596 struct et_softc *sc = ifp->if_softc;
600 res = et_ifmedia_upd_locked(ifp);
607 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
610 struct mii_data *mii;
614 if ((ifp->if_flags & IFF_UP) == 0) {
619 mii = device_get_softc(sc->sc_miibus);
621 ifmr->ifm_active = mii->mii_media_active;
622 ifmr->ifm_status = mii->mii_media_status;
627 et_stop(struct et_softc *sc)
629 struct ifnet *ifp = sc->ifp;
633 callout_stop(&sc->sc_tick);
634 /* Disable interrupts. */
635 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
637 CSR_WRITE_4(sc, ET_MAC_CFG1, CSR_READ_4(sc, ET_MAC_CFG1) & ~(
638 ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN));
650 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
652 sc->watchdog_timer = 0;
653 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
657 et_bus_config(struct et_softc *sc)
659 uint32_t val, max_plsz;
660 uint16_t ack_latency, replay_timer;
663 * Test whether EEPROM is valid
664 * NOTE: Read twice to get the correct value
666 pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
667 val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
668 if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
669 device_printf(sc->dev, "EEPROM status error 0x%02x\n", val);
675 if ((sc->sc_flags & ET_FLAG_PCIE) == 0)
679 * Configure ACK latency and replay timer according to
682 val = pci_read_config(sc->dev,
683 sc->sc_expcap + PCIR_EXPRESS_DEVICE_CAP, 4);
684 max_plsz = val & PCIM_EXP_CAP_MAX_PAYLOAD;
687 case ET_PCIV_DEVICE_CAPS_PLSZ_128:
688 ack_latency = ET_PCIV_ACK_LATENCY_128;
689 replay_timer = ET_PCIV_REPLAY_TIMER_128;
692 case ET_PCIV_DEVICE_CAPS_PLSZ_256:
693 ack_latency = ET_PCIV_ACK_LATENCY_256;
694 replay_timer = ET_PCIV_REPLAY_TIMER_256;
698 ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2);
699 replay_timer = pci_read_config(sc->dev,
700 ET_PCIR_REPLAY_TIMER, 2);
701 device_printf(sc->dev, "ack latency %u, replay timer %u\n",
702 ack_latency, replay_timer);
705 if (ack_latency != 0) {
706 pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
707 pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer,
712 * Set L0s and L1 latency timer to 2us
714 val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4);
715 val &= ~(PCIM_LINK_CAP_L0S_EXIT | PCIM_LINK_CAP_L1_EXIT);
716 /* L0s exit latency : 2us */
718 /* L1 exit latency : 2us */
720 pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4);
723 * Set max read request size to 2048 bytes
725 val = pci_read_config(sc->dev,
726 sc->sc_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
727 val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST;
728 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
729 pci_write_config(sc->dev,
730 sc->sc_expcap + PCIR_EXPRESS_DEVICE_CTL, val, 2);
736 et_get_eaddr(device_t dev, uint8_t eaddr[])
741 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
742 for (i = 0; i < 4; ++i)
743 eaddr[i] = (val >> (8 * i)) & 0xff;
745 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
746 for (; i < ETHER_ADDR_LEN; ++i)
747 eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
751 et_reset(struct et_softc *sc)
753 CSR_WRITE_4(sc, ET_MAC_CFG1,
754 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
755 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
756 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
758 CSR_WRITE_4(sc, ET_SWRST,
759 ET_SWRST_TXDMA | ET_SWRST_RXDMA |
760 ET_SWRST_TXMAC | ET_SWRST_RXMAC |
761 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
763 CSR_WRITE_4(sc, ET_MAC_CFG1,
764 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
765 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
766 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
767 /* Disable interrupts. */
768 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
771 struct et_dmamap_arg {
772 bus_addr_t et_busaddr;
776 et_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
778 struct et_dmamap_arg *ctx;
783 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
786 ctx->et_busaddr = segs->ds_addr;
790 et_dma_ring_alloc(struct et_softc *sc, bus_size_t alignment, bus_size_t maxsize,
791 bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr,
794 struct et_dmamap_arg ctx;
797 error = bus_dma_tag_create(sc->sc_dtag, alignment, 0, BUS_SPACE_MAXADDR,
798 BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL,
801 device_printf(sc->dev, "could not create %s dma tag\n", msg);
804 /* Allocate DMA'able memory for ring. */
805 error = bus_dmamem_alloc(*tag, (void **)ring,
806 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
808 device_printf(sc->dev,
809 "could not allocate DMA'able memory for %s\n", msg);
812 /* Load the address of the ring. */
814 error = bus_dmamap_load(*tag, *map, *ring, maxsize, et_dma_map_addr,
815 &ctx, BUS_DMA_NOWAIT);
817 device_printf(sc->dev,
818 "could not load DMA'able memory for %s\n", msg);
821 *paddr = ctx.et_busaddr;
826 et_dma_ring_free(struct et_softc *sc, bus_dma_tag_t *tag, uint8_t **ring,
831 bus_dmamap_unload(*tag, *map);
832 if (*map != NULL && *ring != NULL) {
833 bus_dmamem_free(*tag, *ring, *map);
838 bus_dma_tag_destroy(*tag);
844 et_dma_alloc(struct et_softc *sc)
846 struct et_txdesc_ring *tx_ring;
847 struct et_rxdesc_ring *rx_ring;
848 struct et_rxstat_ring *rxst_ring;
849 struct et_rxstatus_data *rxsd;
850 struct et_rxbuf_data *rbd;
851 struct et_txbuf_data *tbd;
852 struct et_txstatus_data *txsd;
855 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
856 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
857 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
860 device_printf(sc->dev, "could not allocate parent dma tag\n");
865 tx_ring = &sc->sc_tx_ring;
866 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_TX_RING_SIZE,
867 &tx_ring->tr_dtag, (uint8_t **)&tx_ring->tr_desc, &tx_ring->tr_dmap,
868 &tx_ring->tr_paddr, "TX ring");
872 /* TX status block. */
873 txsd = &sc->sc_tx_status;
874 error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN, sizeof(uint32_t),
875 &txsd->txsd_dtag, (uint8_t **)&txsd->txsd_status, &txsd->txsd_dmap,
876 &txsd->txsd_paddr, "TX status block");
880 /* RX ring 0, used as to recive small sized frames. */
881 rx_ring = &sc->sc_rx_ring[0];
882 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
883 &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
884 &rx_ring->rr_paddr, "RX ring 0");
885 rx_ring->rr_posreg = ET_RX_RING0_POS;
889 /* RX ring 1, used as to store normal sized frames. */
890 rx_ring = &sc->sc_rx_ring[1];
891 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
892 &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
893 &rx_ring->rr_paddr, "RX ring 1");
894 rx_ring->rr_posreg = ET_RX_RING1_POS;
899 rxst_ring = &sc->sc_rxstat_ring;
900 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RXSTAT_RING_SIZE,
901 &rxst_ring->rsr_dtag, (uint8_t **)&rxst_ring->rsr_stat,
902 &rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr, "RX stat ring");
906 /* RX status block. */
907 rxsd = &sc->sc_rx_status;
908 error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN,
909 sizeof(struct et_rxstatus), &rxsd->rxsd_dtag,
910 (uint8_t **)&rxsd->rxsd_status, &rxsd->rxsd_dmap,
911 &rxsd->rxsd_paddr, "RX status block");
915 /* Create parent DMA tag for mbufs. */
916 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
917 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
918 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
921 device_printf(sc->dev,
922 "could not allocate parent dma tag for mbuf\n");
926 /* Create DMA tag for mini RX mbufs to use RX ring 0. */
927 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
928 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1,
929 MHLEN, 0, NULL, NULL, &sc->sc_rx_mini_tag);
931 device_printf(sc->dev, "could not create mini RX dma tag\n");
935 /* Create DMA tag for standard RX mbufs to use RX ring 1. */
936 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
937 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
938 MCLBYTES, 0, NULL, NULL, &sc->sc_rx_tag);
940 device_printf(sc->dev, "could not create RX dma tag\n");
944 /* Create DMA tag for TX mbufs. */
945 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
946 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
947 MCLBYTES * ET_NSEG_MAX, ET_NSEG_MAX, MCLBYTES, 0, NULL, NULL,
950 device_printf(sc->dev, "could not create TX dma tag\n");
954 /* Initialize RX ring 0. */
955 rbd = &sc->sc_rx_data[0];
956 rbd->rbd_bufsize = ET_RXDMA_CTRL_RING0_128;
957 rbd->rbd_newbuf = et_newbuf_hdr;
958 rbd->rbd_discard = et_rxbuf_discard;
960 rbd->rbd_ring = &sc->sc_rx_ring[0];
961 /* Create DMA maps for mini RX buffers, ring 0. */
962 for (i = 0; i < ET_RX_NDESC; i++) {
963 error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
964 &rbd->rbd_buf[i].rb_dmap);
966 device_printf(sc->dev,
967 "could not create DMA map for mini RX mbufs\n");
972 /* Create a spare DMA map for mini RX buffers, ring 0. */
973 error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
974 &sc->sc_rx_mini_sparemap);
976 device_printf(sc->dev,
977 "could not create spare DMA map for mini RX mbuf\n");
981 /* Initialize RX ring 1. */
982 rbd = &sc->sc_rx_data[1];
983 rbd->rbd_bufsize = ET_RXDMA_CTRL_RING1_2048;
984 rbd->rbd_newbuf = et_newbuf_cluster;
985 rbd->rbd_discard = et_rxbuf_discard;
987 rbd->rbd_ring = &sc->sc_rx_ring[1];
988 /* Create DMA maps for standard RX buffers, ring 1. */
989 for (i = 0; i < ET_RX_NDESC; i++) {
990 error = bus_dmamap_create(sc->sc_rx_tag, 0,
991 &rbd->rbd_buf[i].rb_dmap);
993 device_printf(sc->dev,
994 "could not create DMA map for mini RX mbufs\n");
999 /* Create a spare DMA map for standard RX buffers, ring 1. */
1000 error = bus_dmamap_create(sc->sc_rx_tag, 0, &sc->sc_rx_sparemap);
1002 device_printf(sc->dev,
1003 "could not create spare DMA map for RX mbuf\n");
1007 /* Create DMA maps for TX buffers. */
1008 tbd = &sc->sc_tx_data;
1009 for (i = 0; i < ET_TX_NDESC; i++) {
1010 error = bus_dmamap_create(sc->sc_tx_tag, 0,
1011 &tbd->tbd_buf[i].tb_dmap);
1013 device_printf(sc->dev,
1014 "could not create DMA map for TX mbufs\n");
1023 et_dma_free(struct et_softc *sc)
1025 struct et_txdesc_ring *tx_ring;
1026 struct et_rxdesc_ring *rx_ring;
1027 struct et_txstatus_data *txsd;
1028 struct et_rxstat_ring *rxst_ring;
1029 struct et_rxstatus_data *rxsd;
1030 struct et_rxbuf_data *rbd;
1031 struct et_txbuf_data *tbd;
1034 /* Destroy DMA maps for mini RX buffers, ring 0. */
1035 rbd = &sc->sc_rx_data[0];
1036 for (i = 0; i < ET_RX_NDESC; i++) {
1037 if (rbd->rbd_buf[i].rb_dmap) {
1038 bus_dmamap_destroy(sc->sc_rx_mini_tag,
1039 rbd->rbd_buf[i].rb_dmap);
1040 rbd->rbd_buf[i].rb_dmap = NULL;
1043 if (sc->sc_rx_mini_sparemap) {
1044 bus_dmamap_destroy(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap);
1045 sc->sc_rx_mini_sparemap = NULL;
1047 if (sc->sc_rx_mini_tag) {
1048 bus_dma_tag_destroy(sc->sc_rx_mini_tag);
1049 sc->sc_rx_mini_tag = NULL;
1052 /* Destroy DMA maps for standard RX buffers, ring 1. */
1053 rbd = &sc->sc_rx_data[1];
1054 for (i = 0; i < ET_RX_NDESC; i++) {
1055 if (rbd->rbd_buf[i].rb_dmap) {
1056 bus_dmamap_destroy(sc->sc_rx_tag,
1057 rbd->rbd_buf[i].rb_dmap);
1058 rbd->rbd_buf[i].rb_dmap = NULL;
1061 if (sc->sc_rx_sparemap) {
1062 bus_dmamap_destroy(sc->sc_rx_tag, sc->sc_rx_sparemap);
1063 sc->sc_rx_sparemap = NULL;
1065 if (sc->sc_rx_tag) {
1066 bus_dma_tag_destroy(sc->sc_rx_tag);
1067 sc->sc_rx_tag = NULL;
1070 /* Destroy DMA maps for TX buffers. */
1071 tbd = &sc->sc_tx_data;
1072 for (i = 0; i < ET_TX_NDESC; i++) {
1073 if (tbd->tbd_buf[i].tb_dmap) {
1074 bus_dmamap_destroy(sc->sc_tx_tag,
1075 tbd->tbd_buf[i].tb_dmap);
1076 tbd->tbd_buf[i].tb_dmap = NULL;
1079 if (sc->sc_tx_tag) {
1080 bus_dma_tag_destroy(sc->sc_tx_tag);
1081 sc->sc_tx_tag = NULL;
1084 /* Destroy mini RX ring, ring 0. */
1085 rx_ring = &sc->sc_rx_ring[0];
1086 et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1088 /* Destroy standard RX ring, ring 1. */
1089 rx_ring = &sc->sc_rx_ring[1];
1090 et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1092 /* Destroy RX stat ring. */
1093 rxst_ring = &sc->sc_rxstat_ring;
1094 et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1095 &rxst_ring->rsr_dmap);
1096 /* Destroy RX status block. */
1097 rxsd = &sc->sc_rx_status;
1098 et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1099 &rxst_ring->rsr_dmap);
1100 /* Destroy TX ring. */
1101 tx_ring = &sc->sc_tx_ring;
1102 et_dma_ring_free(sc, &tx_ring->tr_dtag, (void *)&tx_ring->tr_desc,
1104 /* Destroy TX status block. */
1105 txsd = &sc->sc_tx_status;
1106 et_dma_ring_free(sc, &txsd->txsd_dtag, (void *)&txsd->txsd_status,
1109 /* Destroy the parent tag. */
1111 bus_dma_tag_destroy(sc->sc_dtag);
1117 et_chip_attach(struct et_softc *sc)
1122 * Perform minimal initialization
1125 /* Disable loopback */
1126 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1129 CSR_WRITE_4(sc, ET_MAC_CFG1,
1130 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1131 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1132 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1135 * Setup half duplex mode
1137 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1138 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1139 (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1140 ET_MAC_HDX_EXC_DEFER;
1141 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1143 /* Clear MAC control */
1144 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1147 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1149 /* Bring MAC out of reset state */
1150 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1152 /* Enable memory controllers */
1153 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1159 struct et_softc *sc = xsc;
1165 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1170 /* Disable further interrupts. */
1171 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
1173 intrs = CSR_READ_4(sc, ET_INTR_STATUS);
1174 if ((intrs & ET_INTRS) == 0)
1177 if (intrs & ET_INTR_RXEOF)
1179 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
1181 if (intrs & ET_INTR_TIMER)
1182 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1184 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1185 CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1186 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1187 et_start_locked(ifp);
1193 et_init_locked(struct et_softc *sc)
1201 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1207 et_init_tx_ring(sc);
1208 error = et_init_rx_ring(sc);
1212 error = et_chip_init(sc);
1217 * Start TX/RX DMA engine
1219 error = et_start_rxdma(sc);
1223 error = et_start_txdma(sc);
1227 /* Enable interrupts. */
1228 CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1230 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1232 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1233 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1235 sc->sc_flags &= ~ET_FLAG_LINK;
1236 et_ifmedia_upd_locked(ifp);
1238 callout_reset(&sc->sc_tick, hz, et_tick, sc);
1248 struct et_softc *sc = xsc;
1256 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1258 struct et_softc *sc = ifp->if_softc;
1259 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1260 struct ifreq *ifr = (struct ifreq *)data;
1261 int error = 0, mask, max_framelen;
1267 if (ifp->if_flags & IFF_UP) {
1268 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1269 if ((ifp->if_flags ^ sc->sc_if_flags) &
1270 (IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1276 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1279 sc->sc_if_flags = ifp->if_flags;
1285 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1290 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1300 if (sc->sc_flags & ET_FLAG_JUMBO)
1301 max_framelen = ET_JUMBO_FRAMELEN;
1304 max_framelen = MCLBYTES - 1;
1306 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1311 if (ifp->if_mtu != ifr->ifr_mtu) {
1312 ifp->if_mtu = ifr->ifr_mtu;
1313 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1320 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1321 if ((mask & IFCAP_TXCSUM) != 0 &&
1322 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1323 ifp->if_capenable ^= IFCAP_TXCSUM;
1324 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1325 ifp->if_hwassist |= ET_CSUM_FEATURES;
1327 ifp->if_hwassist &= ~ET_CSUM_FEATURES;
1333 error = ether_ioctl(ifp, cmd, data);
1340 et_start_locked(struct ifnet *ifp)
1342 struct et_softc *sc;
1343 struct mbuf *m_head = NULL;
1344 struct et_txdesc_ring *tx_ring;
1345 struct et_txbuf_data *tbd;
1346 uint32_t tx_ready_pos;
1352 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1354 (sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) !=
1355 (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED))
1359 * Driver does not request TX completion interrupt for every
1360 * queued frames to prevent generating excessive interrupts.
1361 * This means driver may wait for TX completion interrupt even
1362 * though some frames were sucessfully transmitted. Reclaiming
1363 * transmitted frames will ensure driver see all available
1366 tbd = &sc->sc_tx_data;
1367 if (tbd->tbd_used > (ET_TX_NDESC * 2) / 3)
1370 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1371 if (tbd->tbd_used + ET_NSEG_SPARE >= ET_TX_NDESC) {
1372 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1376 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1380 if (et_encap(sc, &m_head)) {
1381 if (m_head == NULL) {
1385 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1386 if (tbd->tbd_used > 0)
1387 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1391 ETHER_BPF_MTAP(ifp, m_head);
1395 tx_ring = &sc->sc_tx_ring;
1396 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1397 BUS_DMASYNC_PREWRITE);
1398 tx_ready_pos = tx_ring->tr_ready_index &
1399 ET_TX_READY_POS_INDEX_MASK;
1400 if (tx_ring->tr_ready_wrap)
1401 tx_ready_pos |= ET_TX_READY_POS_WRAP;
1402 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
1403 sc->watchdog_timer = 5;
1408 et_start(struct ifnet *ifp)
1410 struct et_softc *sc = ifp->if_softc;
1413 et_start_locked(ifp);
1418 et_watchdog(struct et_softc *sc)
1424 if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
1427 bus_dmamap_sync(sc->sc_tx_status.txsd_dtag, sc->sc_tx_status.txsd_dmap,
1428 BUS_DMASYNC_POSTREAD);
1429 status = le32toh(*(sc->sc_tx_status.txsd_status));
1430 if_printf(sc->ifp, "watchdog timed out (0x%08x) -- resetting\n",
1433 sc->ifp->if_oerrors++;
1434 sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1436 return (EJUSTRETURN);
1440 et_stop_rxdma(struct et_softc *sc)
1442 CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1443 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1446 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1447 if_printf(sc->ifp, "can't stop RX DMA engine\n");
1454 et_stop_txdma(struct et_softc *sc)
1456 CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1457 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1462 et_free_tx_ring(struct et_softc *sc)
1464 struct et_txdesc_ring *tx_ring;
1465 struct et_txbuf_data *tbd;
1466 struct et_txbuf *tb;
1469 tbd = &sc->sc_tx_data;
1470 tx_ring = &sc->sc_tx_ring;
1471 for (i = 0; i < ET_TX_NDESC; ++i) {
1472 tb = &tbd->tbd_buf[i];
1473 if (tb->tb_mbuf != NULL) {
1474 bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
1475 BUS_DMASYNC_POSTWRITE);
1476 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1477 m_freem(tb->tb_mbuf);
1484 et_free_rx_ring(struct et_softc *sc)
1486 struct et_rxbuf_data *rbd;
1487 struct et_rxdesc_ring *rx_ring;
1488 struct et_rxbuf *rb;
1492 rx_ring = &sc->sc_rx_ring[0];
1493 rbd = &sc->sc_rx_data[0];
1494 for (i = 0; i < ET_RX_NDESC; ++i) {
1495 rb = &rbd->rbd_buf[i];
1496 if (rb->rb_mbuf != NULL) {
1497 bus_dmamap_sync(sc->sc_rx_mini_tag, rx_ring->rr_dmap,
1498 BUS_DMASYNC_POSTREAD);
1499 bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
1500 m_freem(rb->rb_mbuf);
1506 rx_ring = &sc->sc_rx_ring[1];
1507 rbd = &sc->sc_rx_data[1];
1508 for (i = 0; i < ET_RX_NDESC; ++i) {
1509 rb = &rbd->rbd_buf[i];
1510 if (rb->rb_mbuf != NULL) {
1511 bus_dmamap_sync(sc->sc_rx_tag, rx_ring->rr_dmap,
1512 BUS_DMASYNC_POSTREAD);
1513 bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
1514 m_freem(rb->rb_mbuf);
1521 et_setmulti(struct et_softc *sc)
1524 uint32_t hash[4] = { 0, 0, 0, 0 };
1525 uint32_t rxmac_ctrl, pktfilt;
1526 struct ifmultiaddr *ifma;
1532 pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1533 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1535 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1536 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1537 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1542 if_maddr_rlock(ifp);
1543 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1546 if (ifma->ifma_addr->sa_family != AF_LINK)
1549 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1550 ifma->ifma_addr), ETHER_ADDR_LEN);
1551 h = (h & 0x3f800000) >> 23;
1554 if (h >= 32 && h < 64) {
1557 } else if (h >= 64 && h < 96) {
1560 } else if (h >= 96) {
1568 if_maddr_runlock(ifp);
1570 for (i = 0; i < 4; ++i)
1571 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1574 pktfilt |= ET_PKTFILT_MCAST;
1575 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1577 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1578 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1582 et_chip_init(struct et_softc *sc)
1584 struct ifnet *ifp = sc->ifp;
1586 int error, frame_len, rxmem_size;
1589 * Split 16Kbytes internal memory between TX and RX
1590 * according to frame length.
1592 frame_len = ET_FRAMELEN(ifp->if_mtu);
1593 if (frame_len < 2048) {
1594 rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1595 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1596 rxmem_size = ET_MEM_SIZE / 2;
1598 rxmem_size = ET_MEM_SIZE -
1599 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1601 rxq_end = ET_QUEUE_ADDR(rxmem_size);
1603 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1604 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1605 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1606 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1609 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1611 /* Clear MSI configure */
1612 if ((sc->sc_flags & ET_FLAG_MSI) == 0)
1613 CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1616 CSR_WRITE_4(sc, ET_TIMER, 0);
1618 /* Initialize MAC */
1621 /* Enable memory controllers */
1622 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1624 /* Initialize RX MAC */
1627 /* Initialize TX MAC */
1630 /* Initialize RX DMA engine */
1631 error = et_init_rxdma(sc);
1635 /* Initialize TX DMA engine */
1636 error = et_init_txdma(sc);
1644 et_init_tx_ring(struct et_softc *sc)
1646 struct et_txdesc_ring *tx_ring;
1647 struct et_txbuf_data *tbd;
1648 struct et_txstatus_data *txsd;
1650 tx_ring = &sc->sc_tx_ring;
1651 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1652 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1653 BUS_DMASYNC_PREWRITE);
1655 tbd = &sc->sc_tx_data;
1656 tbd->tbd_start_index = 0;
1657 tbd->tbd_start_wrap = 0;
1660 txsd = &sc->sc_tx_status;
1661 bzero(txsd->txsd_status, sizeof(uint32_t));
1662 bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1663 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1667 et_init_rx_ring(struct et_softc *sc)
1669 struct et_rxstatus_data *rxsd;
1670 struct et_rxstat_ring *rxst_ring;
1671 struct et_rxbuf_data *rbd;
1674 for (n = 0; n < ET_RX_NRING; ++n) {
1675 rbd = &sc->sc_rx_data[n];
1676 for (i = 0; i < ET_RX_NDESC; ++i) {
1677 error = rbd->rbd_newbuf(rbd, i);
1679 if_printf(sc->ifp, "%d ring %d buf, "
1680 "newbuf failed: %d\n", n, i, error);
1686 rxsd = &sc->sc_rx_status;
1687 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1688 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1689 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1691 rxst_ring = &sc->sc_rxstat_ring;
1692 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1693 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1694 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1700 et_init_rxdma(struct et_softc *sc)
1702 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1703 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1704 struct et_rxdesc_ring *rx_ring;
1707 error = et_stop_rxdma(sc);
1709 if_printf(sc->ifp, "can't init RX DMA engine\n");
1716 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1717 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1720 * Install RX stat ring
1722 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1723 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1724 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1725 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1726 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1728 /* Match ET_RXSTAT_POS */
1729 rxst_ring->rsr_index = 0;
1730 rxst_ring->rsr_wrap = 0;
1733 * Install the 2nd RX descriptor ring
1735 rx_ring = &sc->sc_rx_ring[1];
1736 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1737 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1738 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1739 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1740 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1742 /* Match ET_RX_RING1_POS */
1743 rx_ring->rr_index = 0;
1744 rx_ring->rr_wrap = 1;
1747 * Install the 1st RX descriptor ring
1749 rx_ring = &sc->sc_rx_ring[0];
1750 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1751 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1752 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1753 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1754 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1756 /* Match ET_RX_RING0_POS */
1757 rx_ring->rr_index = 0;
1758 rx_ring->rr_wrap = 1;
1761 * RX intr moderation
1763 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1764 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1770 et_init_txdma(struct et_softc *sc)
1772 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1773 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1776 error = et_stop_txdma(sc);
1778 if_printf(sc->ifp, "can't init TX DMA engine\n");
1783 * Install TX descriptor ring
1785 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1786 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1787 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1792 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1793 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1795 CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1797 /* Match ET_TX_READY_POS */
1798 tx_ring->tr_ready_index = 0;
1799 tx_ring->tr_ready_wrap = 0;
1805 et_init_mac(struct et_softc *sc)
1807 struct ifnet *ifp = sc->ifp;
1808 const uint8_t *eaddr = IF_LLADDR(ifp);
1812 CSR_WRITE_4(sc, ET_MAC_CFG1,
1813 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1814 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1815 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1818 * Setup inter packet gap
1820 val = (56 << ET_IPG_NONB2B_1_SHIFT) |
1821 (88 << ET_IPG_NONB2B_2_SHIFT) |
1822 (80 << ET_IPG_MINIFG_SHIFT) |
1823 (96 << ET_IPG_B2B_SHIFT);
1824 CSR_WRITE_4(sc, ET_IPG, val);
1827 * Setup half duplex mode
1829 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1830 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1831 (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1832 ET_MAC_HDX_EXC_DEFER;
1833 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1835 /* Clear MAC control */
1836 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1839 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1844 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1845 CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1846 val = (eaddr[0] << 16) | (eaddr[1] << 24);
1847 CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1849 /* Set max frame length */
1850 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1852 /* Bring MAC out of reset state */
1853 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1857 et_init_rxmac(struct et_softc *sc)
1859 struct ifnet *ifp = sc->ifp;
1860 const uint8_t *eaddr = IF_LLADDR(ifp);
1864 /* Disable RX MAC and WOL */
1865 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1868 * Clear all WOL related registers
1870 for (i = 0; i < 3; ++i)
1871 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1872 for (i = 0; i < 20; ++i)
1873 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1876 * Set WOL source address. XXX is this necessary?
1878 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1879 CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1880 val = (eaddr[0] << 8) | eaddr[1];
1881 CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1883 /* Clear packet filters */
1884 CSR_WRITE_4(sc, ET_PKTFILT, 0);
1886 /* No ucast filtering */
1887 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1888 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1889 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1891 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1893 * In order to transmit jumbo packets greater than
1894 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1895 * RX MAC and RX DMA needs to be reduced in size to
1896 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In
1897 * order to implement this, we must use "cut through"
1898 * mode in the RX MAC, which chops packets down into
1899 * segments. In this case we selected 256 bytes,
1900 * since this is the size of the PCI-Express TLP's
1901 * that the ET1310 uses.
1903 val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) |
1904 ET_RXMAC_MC_SEGSZ_ENABLE;
1908 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1910 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1912 /* Initialize RX MAC management register */
1913 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1915 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1917 CSR_WRITE_4(sc, ET_RXMAC_MGT,
1918 ET_RXMAC_MGT_PASS_ECRC |
1919 ET_RXMAC_MGT_PASS_ELEN |
1920 ET_RXMAC_MGT_PASS_ETRUNC |
1921 ET_RXMAC_MGT_CHECK_PKT);
1924 * Configure runt filtering (may not work on certain chip generation)
1926 val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) &
1927 ET_PKTFILT_MINLEN_MASK;
1928 val |= ET_PKTFILT_FRAG;
1929 CSR_WRITE_4(sc, ET_PKTFILT, val);
1931 /* Enable RX MAC but leave WOL disabled */
1932 CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1933 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1936 * Setup multicast hash and allmulti/promisc mode
1942 et_init_txmac(struct et_softc *sc)
1944 /* Disable TX MAC and FC(?) */
1945 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1947 /* No flow control yet */
1948 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1950 /* Enable TX MAC but leave FC(?) diabled */
1951 CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1952 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1956 et_start_rxdma(struct et_softc *sc)
1960 val |= (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) |
1961 ET_RXDMA_CTRL_RING0_ENABLE;
1962 val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) |
1963 ET_RXDMA_CTRL_RING1_ENABLE;
1965 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1969 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1970 if_printf(sc->ifp, "can't start RX DMA engine\n");
1977 et_start_txdma(struct et_softc *sc)
1979 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1984 et_rxeof(struct et_softc *sc)
1986 struct et_rxstatus_data *rxsd;
1987 struct et_rxstat_ring *rxst_ring;
1988 struct et_rxbuf_data *rbd;
1989 struct et_rxdesc_ring *rx_ring;
1990 struct et_rxstat *st;
1993 uint32_t rxstat_pos, rxring_pos;
1994 uint32_t rxst_info1, rxst_info2, rxs_stat_ring;
1995 int buflen, buf_idx, npost[2], ring_idx;
1996 int rxst_index, rxst_wrap;
2001 rxsd = &sc->sc_rx_status;
2002 rxst_ring = &sc->sc_rxstat_ring;
2004 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2007 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2008 BUS_DMASYNC_POSTREAD);
2009 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2010 BUS_DMASYNC_POSTREAD);
2012 npost[0] = npost[1] = 0;
2013 rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring);
2014 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
2015 rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >>
2016 ET_RXS_STATRING_INDEX_SHIFT;
2018 while (rxst_index != rxst_ring->rsr_index ||
2019 rxst_wrap != rxst_ring->rsr_wrap) {
2020 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2023 MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
2024 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
2025 rxst_info1 = le32toh(st->rxst_info1);
2026 rxst_info2 = le32toh(st->rxst_info2);
2027 buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >>
2028 ET_RXST_INFO2_LEN_SHIFT;
2029 buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >>
2030 ET_RXST_INFO2_BUFIDX_SHIFT;
2031 ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >>
2032 ET_RXST_INFO2_RINGIDX_SHIFT;
2034 if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
2035 rxst_ring->rsr_index = 0;
2036 rxst_ring->rsr_wrap ^= 1;
2038 rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK;
2039 if (rxst_ring->rsr_wrap)
2040 rxstat_pos |= ET_RXSTAT_POS_WRAP;
2041 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
2043 if (ring_idx >= ET_RX_NRING) {
2045 if_printf(ifp, "invalid ring index %d\n", ring_idx);
2048 if (buf_idx >= ET_RX_NDESC) {
2050 if_printf(ifp, "invalid buf index %d\n", buf_idx);
2054 rbd = &sc->sc_rx_data[ring_idx];
2055 m = rbd->rbd_buf[buf_idx].rb_mbuf;
2056 if ((rxst_info1 & ET_RXST_INFO1_OK) == 0){
2057 /* Discard errored frame. */
2058 rbd->rbd_discard(rbd, buf_idx);
2059 } else if (rbd->rbd_newbuf(rbd, buf_idx) != 0) {
2060 /* No available mbufs, discard it. */
2062 rbd->rbd_discard(rbd, buf_idx);
2064 buflen -= ETHER_CRC_LEN;
2065 if (buflen < ETHER_HDR_LEN) {
2069 m->m_pkthdr.len = m->m_len = buflen;
2070 m->m_pkthdr.rcvif = ifp;
2072 ifp->if_input(ifp, m);
2077 rx_ring = &sc->sc_rx_ring[ring_idx];
2078 if (buf_idx != rx_ring->rr_index) {
2080 "WARNING!! ring %d, buf_idx %d, rr_idx %d\n",
2081 ring_idx, buf_idx, rx_ring->rr_index);
2084 MPASS(rx_ring->rr_index < ET_RX_NDESC);
2085 if (++rx_ring->rr_index == ET_RX_NDESC) {
2086 rx_ring->rr_index = 0;
2087 rx_ring->rr_wrap ^= 1;
2089 rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK;
2090 if (rx_ring->rr_wrap)
2091 rxring_pos |= ET_RX_RING_POS_WRAP;
2092 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
2095 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2096 BUS_DMASYNC_PREREAD);
2097 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2098 BUS_DMASYNC_PREREAD);
2102 et_encap(struct et_softc *sc, struct mbuf **m0)
2104 struct et_txdesc_ring *tx_ring;
2105 struct et_txbuf_data *tbd;
2106 struct et_txdesc *td;
2108 bus_dma_segment_t segs[ET_NSEG_MAX];
2110 uint32_t csum_flags, last_td_ctrl2;
2111 int error, i, idx, first_idx, last_idx, nsegs;
2113 tx_ring = &sc->sc_tx_ring;
2114 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2115 tbd = &sc->sc_tx_data;
2116 first_idx = tx_ring->tr_ready_index;
2117 map = tbd->tbd_buf[first_idx].tb_dmap;
2119 error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, &nsegs,
2121 if (error == EFBIG) {
2122 m = m_collapse(*m0, M_DONTWAIT, ET_NSEG_MAX);
2129 error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs,
2136 } else if (error != 0)
2139 /* Check for descriptor overruns. */
2140 if (tbd->tbd_used + nsegs > ET_TX_NDESC - 1) {
2141 bus_dmamap_unload(sc->sc_tx_tag, map);
2144 bus_dmamap_sync(sc->sc_tx_tag, map, BUS_DMASYNC_PREWRITE);
2146 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2148 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2149 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2150 last_td_ctrl2 |= ET_TDCTRL2_INTR;
2155 if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) {
2156 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2157 csum_flags |= ET_TDCTRL2_CSUM_IP;
2158 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2159 csum_flags |= ET_TDCTRL2_CSUM_UDP;
2160 else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2161 csum_flags |= ET_TDCTRL2_CSUM_TCP;
2164 for (i = 0; i < nsegs; ++i) {
2165 idx = (first_idx + i) % ET_TX_NDESC;
2166 td = &tx_ring->tr_desc[idx];
2167 td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr));
2168 td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr));
2169 td->td_ctrl1 = htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK);
2170 if (i == nsegs - 1) {
2172 td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags);
2175 td->td_ctrl2 = htole32(csum_flags);
2177 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2178 if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2179 tx_ring->tr_ready_index = 0;
2180 tx_ring->tr_ready_wrap ^= 1;
2183 td = &tx_ring->tr_desc[first_idx];
2185 td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG);
2187 MPASS(last_idx >= 0);
2188 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2189 tbd->tbd_buf[last_idx].tb_dmap = map;
2190 tbd->tbd_buf[last_idx].tb_mbuf = m;
2192 tbd->tbd_used += nsegs;
2193 MPASS(tbd->tbd_used <= ET_TX_NDESC);
2199 et_txeof(struct et_softc *sc)
2201 struct et_txdesc_ring *tx_ring;
2202 struct et_txbuf_data *tbd;
2203 struct et_txbuf *tb;
2211 tx_ring = &sc->sc_tx_ring;
2212 tbd = &sc->sc_tx_data;
2214 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2217 if (tbd->tbd_used == 0)
2220 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2221 BUS_DMASYNC_POSTWRITE);
2223 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2224 end = tx_done & ET_TX_DONE_POS_INDEX_MASK;
2225 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2227 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2228 MPASS(tbd->tbd_start_index < ET_TX_NDESC);
2229 tb = &tbd->tbd_buf[tbd->tbd_start_index];
2230 if (tb->tb_mbuf != NULL) {
2231 bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
2232 BUS_DMASYNC_POSTWRITE);
2233 bus_dmamap_unload(sc->sc_tx_tag, tb->tb_dmap);
2234 m_freem(tb->tb_mbuf);
2238 if (++tbd->tbd_start_index == ET_TX_NDESC) {
2239 tbd->tbd_start_index = 0;
2240 tbd->tbd_start_wrap ^= 1;
2243 MPASS(tbd->tbd_used > 0);
2247 if (tbd->tbd_used == 0)
2248 sc->watchdog_timer = 0;
2249 if (tbd->tbd_used + ET_NSEG_SPARE < ET_TX_NDESC)
2250 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2256 struct et_softc *sc = xsc;
2258 struct mii_data *mii;
2262 mii = device_get_softc(sc->sc_miibus);
2265 et_stats_update(sc);
2266 if (et_watchdog(sc) == EJUSTRETURN)
2268 callout_reset(&sc->sc_tick, hz, et_tick, sc);
2272 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx)
2274 struct et_softc *sc;
2275 struct et_rxdesc *desc;
2276 struct et_rxbuf *rb;
2278 bus_dma_segment_t segs[1];
2282 MPASS(buf_idx < ET_RX_NDESC);
2283 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2286 m->m_len = m->m_pkthdr.len = MCLBYTES;
2287 m_adj(m, ETHER_ALIGN);
2289 sc = rbd->rbd_softc;
2290 rb = &rbd->rbd_buf[buf_idx];
2292 if (bus_dmamap_load_mbuf_sg(sc->sc_rx_tag, sc->sc_rx_sparemap, m,
2293 segs, &nsegs, 0) != 0) {
2297 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2299 if (rb->rb_mbuf != NULL) {
2300 bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap,
2301 BUS_DMASYNC_POSTREAD);
2302 bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
2305 rb->rb_dmap = sc->sc_rx_sparemap;
2306 sc->sc_rx_sparemap = dmap;
2307 bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2310 desc = &rbd->rbd_ring->rr_desc[buf_idx];
2311 desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2312 desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2313 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2314 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2315 BUS_DMASYNC_PREWRITE);
2320 et_rxbuf_discard(struct et_rxbuf_data *rbd, int buf_idx)
2322 struct et_rxdesc *desc;
2324 desc = &rbd->rbd_ring->rr_desc[buf_idx];
2325 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2326 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2327 BUS_DMASYNC_PREWRITE);
2331 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx)
2333 struct et_softc *sc;
2334 struct et_rxdesc *desc;
2335 struct et_rxbuf *rb;
2337 bus_dma_segment_t segs[1];
2341 MPASS(buf_idx < ET_RX_NDESC);
2342 MGETHDR(m, M_DONTWAIT, MT_DATA);
2345 m->m_len = m->m_pkthdr.len = MHLEN;
2346 m_adj(m, ETHER_ALIGN);
2348 sc = rbd->rbd_softc;
2349 rb = &rbd->rbd_buf[buf_idx];
2351 if (bus_dmamap_load_mbuf_sg(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap,
2352 m, segs, &nsegs, 0) != 0) {
2356 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2358 if (rb->rb_mbuf != NULL) {
2359 bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap,
2360 BUS_DMASYNC_POSTREAD);
2361 bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
2364 rb->rb_dmap = sc->sc_rx_mini_sparemap;
2365 sc->sc_rx_mini_sparemap = dmap;
2366 bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2369 desc = &rbd->rbd_ring->rr_desc[buf_idx];
2370 desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2371 desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2372 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2373 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2374 BUS_DMASYNC_PREWRITE);
2378 #define ET_SYSCTL_STAT_ADD32(c, h, n, p, d) \
2379 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2380 #define ET_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2381 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2384 * Create sysctl tree
2387 et_add_sysctls(struct et_softc * sc)
2389 struct sysctl_ctx_list *ctx;
2390 struct sysctl_oid_list *children, *parent;
2391 struct sysctl_oid *tree;
2392 struct et_hw_stats *stats;
2394 ctx = device_get_sysctl_ctx(sc->dev);
2395 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2397 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
2398 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I",
2399 "RX IM, # packets per RX interrupt");
2400 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
2401 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I",
2402 "RX IM, RX interrupt delay (x10 usec)");
2403 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
2404 CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
2405 "TX IM, # segments per TX interrupt");
2406 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
2407 CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
2409 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
2410 NULL, "ET statistics");
2411 parent = SYSCTL_CHILDREN(tree);
2413 /* TX/RX statistics. */
2414 stats = &sc->sc_stats;
2415 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_64", &stats->pkts_64,
2416 "0 to 64 bytes frames");
2417 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_65_127", &stats->pkts_65,
2418 "65 to 127 bytes frames");
2419 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_128_255", &stats->pkts_128,
2420 "128 to 255 bytes frames");
2421 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_256_511", &stats->pkts_256,
2422 "256 to 511 bytes frames");
2423 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_512_1023", &stats->pkts_512,
2424 "512 to 1023 bytes frames");
2425 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1024_1518", &stats->pkts_1024,
2426 "1024 to 1518 bytes frames");
2427 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1519_1522", &stats->pkts_1519,
2428 "1519 to 1522 bytes frames");
2430 /* RX statistics. */
2431 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2432 NULL, "RX MAC statistics");
2433 children = SYSCTL_CHILDREN(tree);
2434 ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
2435 &stats->rx_bytes, "Good bytes");
2436 ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
2437 &stats->rx_frames, "Good frames");
2438 ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
2439 &stats->rx_crcerrs, "CRC errors");
2440 ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
2441 &stats->rx_mcast, "Multicast frames");
2442 ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
2443 &stats->rx_bcast, "Broadcast frames");
2444 ET_SYSCTL_STAT_ADD32(ctx, children, "control",
2445 &stats->rx_control, "Control frames");
2446 ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
2447 &stats->rx_pause, "Pause frames");
2448 ET_SYSCTL_STAT_ADD32(ctx, children, "unknown_control",
2449 &stats->rx_unknown_control, "Unknown control frames");
2450 ET_SYSCTL_STAT_ADD32(ctx, children, "align_errs",
2451 &stats->rx_alignerrs, "Alignment errors");
2452 ET_SYSCTL_STAT_ADD32(ctx, children, "len_errs",
2453 &stats->rx_lenerrs, "Frames with length mismatched");
2454 ET_SYSCTL_STAT_ADD32(ctx, children, "code_errs",
2455 &stats->rx_codeerrs, "Frames with code error");
2456 ET_SYSCTL_STAT_ADD32(ctx, children, "cs_errs",
2457 &stats->rx_cserrs, "Frames with carrier sense error");
2458 ET_SYSCTL_STAT_ADD32(ctx, children, "runts",
2459 &stats->rx_runts, "Too short frames");
2460 ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
2461 &stats->rx_oversize, "Oversized frames");
2462 ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
2463 &stats->rx_fragments, "Fragmented frames");
2464 ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
2465 &stats->rx_jabbers, "Frames with jabber error");
2466 ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
2467 &stats->rx_drop, "Dropped frames");
2469 /* TX statistics. */
2470 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2471 NULL, "TX MAC statistics");
2472 children = SYSCTL_CHILDREN(tree);
2473 ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
2474 &stats->tx_bytes, "Good bytes");
2475 ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
2476 &stats->tx_frames, "Good frames");
2477 ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
2478 &stats->tx_mcast, "Multicast frames");
2479 ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
2480 &stats->tx_bcast, "Broadcast frames");
2481 ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
2482 &stats->tx_pause, "Pause frames");
2483 ET_SYSCTL_STAT_ADD32(ctx, children, "deferred",
2484 &stats->tx_deferred, "Deferred frames");
2485 ET_SYSCTL_STAT_ADD32(ctx, children, "excess_deferred",
2486 &stats->tx_excess_deferred, "Excessively deferred frames");
2487 ET_SYSCTL_STAT_ADD32(ctx, children, "single_colls",
2488 &stats->tx_single_colls, "Single collisions");
2489 ET_SYSCTL_STAT_ADD32(ctx, children, "multi_colls",
2490 &stats->tx_multi_colls, "Multiple collisions");
2491 ET_SYSCTL_STAT_ADD32(ctx, children, "late_colls",
2492 &stats->tx_late_colls, "Late collisions");
2493 ET_SYSCTL_STAT_ADD32(ctx, children, "excess_colls",
2494 &stats->tx_excess_colls, "Excess collisions");
2495 ET_SYSCTL_STAT_ADD32(ctx, children, "total_colls",
2496 &stats->tx_total_colls, "Total collisions");
2497 ET_SYSCTL_STAT_ADD32(ctx, children, "pause_honored",
2498 &stats->tx_pause_honored, "Honored pause frames");
2499 ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
2500 &stats->tx_drop, "Dropped frames");
2501 ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
2502 &stats->tx_jabbers, "Frames with jabber errors");
2503 ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
2504 &stats->tx_crcerrs, "Frames with CRC errors");
2505 ET_SYSCTL_STAT_ADD32(ctx, children, "control",
2506 &stats->tx_control, "Control frames");
2507 ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
2508 &stats->tx_oversize, "Oversized frames");
2509 ET_SYSCTL_STAT_ADD32(ctx, children, "undersize",
2510 &stats->tx_undersize, "Undersized frames");
2511 ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
2512 &stats->tx_fragments, "Fragmented frames");
2515 #undef ET_SYSCTL_STAT_ADD32
2516 #undef ET_SYSCTL_STAT_ADD64
2519 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2521 struct et_softc *sc = arg1;
2522 struct ifnet *ifp = sc->ifp;
2525 v = sc->sc_rx_intr_npkts;
2526 error = sysctl_handle_int(oidp, &v, 0, req);
2527 if (error || req->newptr == NULL)
2534 if (sc->sc_rx_intr_npkts != v) {
2535 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2536 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2537 sc->sc_rx_intr_npkts = v;
2544 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2546 struct et_softc *sc = arg1;
2547 struct ifnet *ifp = sc->ifp;
2550 v = sc->sc_rx_intr_delay;
2551 error = sysctl_handle_int(oidp, &v, 0, req);
2552 if (error || req->newptr == NULL)
2559 if (sc->sc_rx_intr_delay != v) {
2560 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2561 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2562 sc->sc_rx_intr_delay = v;
2569 et_stats_update(struct et_softc *sc)
2572 struct et_hw_stats *stats;
2574 stats = &sc->sc_stats;
2575 stats->pkts_64 += CSR_READ_4(sc, ET_STAT_PKTS_64);
2576 stats->pkts_65 += CSR_READ_4(sc, ET_STAT_PKTS_65_127);
2577 stats->pkts_128 += CSR_READ_4(sc, ET_STAT_PKTS_128_255);
2578 stats->pkts_256 += CSR_READ_4(sc, ET_STAT_PKTS_256_511);
2579 stats->pkts_512 += CSR_READ_4(sc, ET_STAT_PKTS_512_1023);
2580 stats->pkts_1024 += CSR_READ_4(sc, ET_STAT_PKTS_1024_1518);
2581 stats->pkts_1519 += CSR_READ_4(sc, ET_STAT_PKTS_1519_1522);
2583 stats->rx_bytes += CSR_READ_4(sc, ET_STAT_RX_BYTES);
2584 stats->rx_frames += CSR_READ_4(sc, ET_STAT_RX_FRAMES);
2585 stats->rx_crcerrs += CSR_READ_4(sc, ET_STAT_RX_CRC_ERR);
2586 stats->rx_mcast += CSR_READ_4(sc, ET_STAT_RX_MCAST);
2587 stats->rx_bcast += CSR_READ_4(sc, ET_STAT_RX_BCAST);
2588 stats->rx_control += CSR_READ_4(sc, ET_STAT_RX_CTL);
2589 stats->rx_pause += CSR_READ_4(sc, ET_STAT_RX_PAUSE);
2590 stats->rx_unknown_control += CSR_READ_4(sc, ET_STAT_RX_UNKNOWN_CTL);
2591 stats->rx_alignerrs += CSR_READ_4(sc, ET_STAT_RX_ALIGN_ERR);
2592 stats->rx_lenerrs += CSR_READ_4(sc, ET_STAT_RX_LEN_ERR);
2593 stats->rx_codeerrs += CSR_READ_4(sc, ET_STAT_RX_CODE_ERR);
2594 stats->rx_cserrs += CSR_READ_4(sc, ET_STAT_RX_CS_ERR);
2595 stats->rx_runts += CSR_READ_4(sc, ET_STAT_RX_RUNT);
2596 stats->rx_oversize += CSR_READ_4(sc, ET_STAT_RX_OVERSIZE);
2597 stats->rx_fragments += CSR_READ_4(sc, ET_STAT_RX_FRAG);
2598 stats->rx_jabbers += CSR_READ_4(sc, ET_STAT_RX_JABBER);
2599 stats->rx_drop += CSR_READ_4(sc, ET_STAT_RX_DROP);
2601 stats->tx_bytes += CSR_READ_4(sc, ET_STAT_TX_BYTES);
2602 stats->tx_frames += CSR_READ_4(sc, ET_STAT_TX_FRAMES);
2603 stats->tx_mcast += CSR_READ_4(sc, ET_STAT_TX_MCAST);
2604 stats->tx_bcast += CSR_READ_4(sc, ET_STAT_TX_BCAST);
2605 stats->tx_pause += CSR_READ_4(sc, ET_STAT_TX_PAUSE);
2606 stats->tx_deferred += CSR_READ_4(sc, ET_STAT_TX_DEFER);
2607 stats->tx_excess_deferred += CSR_READ_4(sc, ET_STAT_TX_EXCESS_DEFER);
2608 stats->tx_single_colls += CSR_READ_4(sc, ET_STAT_TX_SINGLE_COL);
2609 stats->tx_multi_colls += CSR_READ_4(sc, ET_STAT_TX_MULTI_COL);
2610 stats->tx_late_colls += CSR_READ_4(sc, ET_STAT_TX_LATE_COL);
2611 stats->tx_excess_colls += CSR_READ_4(sc, ET_STAT_TX_EXCESS_COL);
2612 stats->tx_total_colls += CSR_READ_4(sc, ET_STAT_TX_TOTAL_COL);
2613 stats->tx_pause_honored += CSR_READ_4(sc, ET_STAT_TX_PAUSE_HONOR);
2614 stats->tx_drop += CSR_READ_4(sc, ET_STAT_TX_DROP);
2615 stats->tx_jabbers += CSR_READ_4(sc, ET_STAT_TX_JABBER);
2616 stats->tx_crcerrs += CSR_READ_4(sc, ET_STAT_TX_CRC_ERR);
2617 stats->tx_control += CSR_READ_4(sc, ET_STAT_TX_CTL);
2618 stats->tx_oversize += CSR_READ_4(sc, ET_STAT_TX_OVERSIZE);
2619 stats->tx_undersize += CSR_READ_4(sc, ET_STAT_TX_UNDERSIZE);
2620 stats->tx_fragments += CSR_READ_4(sc, ET_STAT_TX_FRAG);
2622 /* Update ifnet counters. */
2624 ifp->if_opackets = (u_long)stats->tx_frames;
2625 ifp->if_collisions = stats->tx_total_colls;
2626 ifp->if_oerrors = stats->tx_drop + stats->tx_jabbers +
2627 stats->tx_crcerrs + stats->tx_excess_deferred +
2628 stats->tx_late_colls;
2629 ifp->if_ipackets = (u_long)stats->rx_frames;
2630 ifp->if_ierrors = stats->rx_crcerrs + stats->rx_alignerrs +
2631 stats->rx_lenerrs + stats->rx_codeerrs + stats->rx_cserrs +
2632 stats->rx_runts + stats->rx_jabbers + stats->rx_drop;
2636 et_suspend(device_t dev)
2638 struct et_softc *sc;
2640 sc = device_get_softc(dev);
2642 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2649 et_resume(device_t dev)
2651 struct et_softc *sc;
2653 sc = device_get_softc(dev);
2655 if ((sc->ifp->if_flags & IFF_UP) != 0)