2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2007 Sepherosa Ziehau. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Sepherosa Ziehau <sepherosa@gmail.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/endian.h>
45 #include <sys/kernel.h>
47 #include <sys/malloc.h>
51 #include <sys/module.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
56 #include <net/ethernet.h>
58 #include <net/if_var.h>
59 #include <net/if_dl.h>
60 #include <net/if_types.h>
62 #include <net/if_arp.h>
63 #include <net/if_media.h>
64 #include <net/if_vlan_var.h>
66 #include <machine/bus.h>
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
74 #include <dev/et/if_etreg.h>
75 #include <dev/et/if_etvar.h>
77 #include "miibus_if.h"
79 MODULE_DEPEND(et, pci, 1, 1, 1);
80 MODULE_DEPEND(et, ether, 1, 1, 1);
81 MODULE_DEPEND(et, miibus, 1, 1, 1);
84 static int msi_disable = 0;
85 TUNABLE_INT("hw.et.msi_disable", &msi_disable);
87 #define ET_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
89 static int et_probe(device_t);
90 static int et_attach(device_t);
91 static int et_detach(device_t);
92 static int et_shutdown(device_t);
93 static int et_suspend(device_t);
94 static int et_resume(device_t);
96 static int et_miibus_readreg(device_t, int, int);
97 static int et_miibus_writereg(device_t, int, int, int);
98 static void et_miibus_statchg(device_t);
100 static void et_init_locked(struct et_softc *);
101 static void et_init(void *);
102 static int et_ioctl(struct ifnet *, u_long, caddr_t);
103 static void et_start_locked(struct ifnet *);
104 static void et_start(struct ifnet *);
105 static int et_watchdog(struct et_softc *);
106 static int et_ifmedia_upd_locked(struct ifnet *);
107 static int et_ifmedia_upd(struct ifnet *);
108 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
109 static uint64_t et_get_counter(struct ifnet *, ift_counter);
111 static void et_add_sysctls(struct et_softc *);
112 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
113 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
115 static void et_intr(void *);
116 static void et_rxeof(struct et_softc *);
117 static void et_txeof(struct et_softc *);
119 static int et_dma_alloc(struct et_softc *);
120 static void et_dma_free(struct et_softc *);
121 static void et_dma_map_addr(void *, bus_dma_segment_t *, int, int);
122 static int et_dma_ring_alloc(struct et_softc *, bus_size_t, bus_size_t,
123 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *,
125 static void et_dma_ring_free(struct et_softc *, bus_dma_tag_t *, uint8_t **,
126 bus_dmamap_t, bus_addr_t *);
127 static void et_init_tx_ring(struct et_softc *);
128 static int et_init_rx_ring(struct et_softc *);
129 static void et_free_tx_ring(struct et_softc *);
130 static void et_free_rx_ring(struct et_softc *);
131 static int et_encap(struct et_softc *, struct mbuf **);
132 static int et_newbuf_cluster(struct et_rxbuf_data *, int);
133 static int et_newbuf_hdr(struct et_rxbuf_data *, int);
134 static void et_rxbuf_discard(struct et_rxbuf_data *, int);
136 static void et_stop(struct et_softc *);
137 static int et_chip_init(struct et_softc *);
138 static void et_chip_attach(struct et_softc *);
139 static void et_init_mac(struct et_softc *);
140 static void et_init_rxmac(struct et_softc *);
141 static void et_init_txmac(struct et_softc *);
142 static int et_init_rxdma(struct et_softc *);
143 static int et_init_txdma(struct et_softc *);
144 static int et_start_rxdma(struct et_softc *);
145 static int et_start_txdma(struct et_softc *);
146 static int et_stop_rxdma(struct et_softc *);
147 static int et_stop_txdma(struct et_softc *);
148 static void et_reset(struct et_softc *);
149 static int et_bus_config(struct et_softc *);
150 static void et_get_eaddr(device_t, uint8_t[]);
151 static void et_setmulti(struct et_softc *);
152 static void et_tick(void *);
153 static void et_stats_update(struct et_softc *);
155 static const struct et_dev {
160 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
161 "Agere ET1310 Gigabit Ethernet" },
162 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
163 "Agere ET1310 Fast Ethernet" },
167 static device_method_t et_methods[] = {
168 DEVMETHOD(device_probe, et_probe),
169 DEVMETHOD(device_attach, et_attach),
170 DEVMETHOD(device_detach, et_detach),
171 DEVMETHOD(device_shutdown, et_shutdown),
172 DEVMETHOD(device_suspend, et_suspend),
173 DEVMETHOD(device_resume, et_resume),
175 DEVMETHOD(miibus_readreg, et_miibus_readreg),
176 DEVMETHOD(miibus_writereg, et_miibus_writereg),
177 DEVMETHOD(miibus_statchg, et_miibus_statchg),
182 static driver_t et_driver = {
185 sizeof(struct et_softc)
188 static devclass_t et_devclass;
190 DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0);
191 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, et, et_devices,
192 nitems(et_devices) - 1);
193 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0);
195 static int et_rx_intr_npkts = 32;
196 static int et_rx_intr_delay = 20; /* x10 usec */
197 static int et_tx_intr_nsegs = 126;
198 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
200 TUNABLE_INT("hw.et.timer", &et_timer);
201 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
202 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
203 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
206 et_probe(device_t dev)
208 const struct et_dev *d;
211 vid = pci_get_vendor(dev);
212 did = pci_get_device(dev);
214 for (d = et_devices; d->desc != NULL; ++d) {
215 if (vid == d->vid && did == d->did) {
216 device_set_desc(dev, d->desc);
217 return (BUS_PROBE_DEFAULT);
224 et_attach(device_t dev)
228 uint8_t eaddr[ETHER_ADDR_LEN];
230 int cap, error, msic;
232 sc = device_get_softc(dev);
234 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
236 callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
238 ifp = sc->ifp = if_alloc(IFT_ETHER);
240 device_printf(dev, "can not if_alloc()\n");
246 * Initialize tunables
248 sc->sc_rx_intr_npkts = et_rx_intr_npkts;
249 sc->sc_rx_intr_delay = et_rx_intr_delay;
250 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
251 sc->sc_timer = et_timer;
253 /* Enable bus mastering */
254 pci_enable_busmaster(dev);
259 sc->sc_mem_rid = PCIR_BAR(0);
260 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
261 &sc->sc_mem_rid, RF_ACTIVE);
262 if (sc->sc_mem_res == NULL) {
263 device_printf(dev, "can't allocate IO memory\n");
268 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
270 sc->sc_flags |= ET_FLAG_PCIE;
271 msic = pci_msi_count(dev);
273 device_printf(dev, "MSI count: %d\n", msic);
275 if (msic > 0 && msi_disable == 0) {
277 if (pci_alloc_msi(dev, &msic) == 0) {
279 device_printf(dev, "Using %d MSI message\n",
281 sc->sc_flags |= ET_FLAG_MSI;
283 pci_release_msi(dev);
290 if ((sc->sc_flags & ET_FLAG_MSI) == 0) {
292 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
293 &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE);
296 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
297 &sc->sc_irq_rid, RF_ACTIVE);
299 if (sc->sc_irq_res == NULL) {
300 device_printf(dev, "can't allocate irq\n");
305 if (pci_get_device(dev) == PCI_PRODUCT_LUCENT_ET1310_FAST)
306 sc->sc_flags |= ET_FLAG_FASTETHER;
308 error = et_bus_config(sc);
312 et_get_eaddr(dev, eaddr);
314 /* Take PHY out of COMA and enable clocks. */
315 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
316 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
317 pmcfg |= EM_PM_GIGEPHY_ENB;
318 CSR_WRITE_4(sc, ET_PM, pmcfg);
322 error = et_dma_alloc(sc);
327 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
328 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
329 ifp->if_init = et_init;
330 ifp->if_ioctl = et_ioctl;
331 ifp->if_start = et_start;
332 ifp->if_get_counter = et_get_counter;
333 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU;
334 ifp->if_capenable = ifp->if_capabilities;
335 ifp->if_snd.ifq_drv_maxlen = ET_TX_NDESC - 1;
336 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC - 1);
337 IFQ_SET_READY(&ifp->if_snd);
341 error = mii_attach(dev, &sc->sc_miibus, ifp, et_ifmedia_upd,
342 et_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
345 device_printf(dev, "attaching PHYs failed\n");
349 ether_ifattach(ifp, eaddr);
351 /* Tell the upper layer(s) we support long frames. */
352 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
354 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
355 NULL, et_intr, sc, &sc->sc_irq_handle);
358 device_printf(dev, "can't setup intr\n");
371 et_detach(device_t dev)
375 sc = device_get_softc(dev);
376 if (device_is_attached(dev)) {
377 ether_ifdetach(sc->ifp);
381 callout_drain(&sc->sc_tick);
384 if (sc->sc_miibus != NULL)
385 device_delete_child(dev, sc->sc_miibus);
386 bus_generic_detach(dev);
388 if (sc->sc_irq_handle != NULL)
389 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
390 if (sc->sc_irq_res != NULL)
391 bus_release_resource(dev, SYS_RES_IRQ,
392 rman_get_rid(sc->sc_irq_res), sc->sc_irq_res);
393 if ((sc->sc_flags & ET_FLAG_MSI) != 0)
394 pci_release_msi(dev);
395 if (sc->sc_mem_res != NULL)
396 bus_release_resource(dev, SYS_RES_MEMORY,
397 rman_get_rid(sc->sc_mem_res), sc->sc_mem_res);
404 mtx_destroy(&sc->sc_mtx);
410 et_shutdown(device_t dev)
414 sc = device_get_softc(dev);
422 et_miibus_readreg(device_t dev, int phy, int reg)
428 sc = device_get_softc(dev);
429 /* Stop any pending operations */
430 CSR_WRITE_4(sc, ET_MII_CMD, 0);
432 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
433 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
434 CSR_WRITE_4(sc, ET_MII_ADDR, val);
437 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
441 for (i = 0; i < NRETRY; ++i) {
442 val = CSR_READ_4(sc, ET_MII_IND);
443 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
449 "read phy %d, reg %d timed out\n", phy, reg);
456 val = CSR_READ_4(sc, ET_MII_STAT);
457 ret = val & ET_MII_STAT_VALUE_MASK;
460 /* Make sure that the current operation is stopped */
461 CSR_WRITE_4(sc, ET_MII_CMD, 0);
466 et_miibus_writereg(device_t dev, int phy, int reg, int val0)
472 sc = device_get_softc(dev);
473 /* Stop any pending operations */
474 CSR_WRITE_4(sc, ET_MII_CMD, 0);
476 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
477 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
478 CSR_WRITE_4(sc, ET_MII_ADDR, val);
481 CSR_WRITE_4(sc, ET_MII_CTRL,
482 (val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK);
486 for (i = 0; i < NRETRY; ++i) {
487 val = CSR_READ_4(sc, ET_MII_IND);
488 if ((val & ET_MII_IND_BUSY) == 0)
494 "write phy %d, reg %d timed out\n", phy, reg);
495 et_miibus_readreg(dev, phy, reg);
500 /* Make sure that the current operation is stopped */
501 CSR_WRITE_4(sc, ET_MII_CMD, 0);
506 et_miibus_statchg(device_t dev)
509 struct mii_data *mii;
511 uint32_t cfg1, cfg2, ctrl;
514 sc = device_get_softc(dev);
516 mii = device_get_softc(sc->sc_miibus);
518 if (mii == NULL || ifp == NULL ||
519 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
522 sc->sc_flags &= ~ET_FLAG_LINK;
523 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
524 (IFM_ACTIVE | IFM_AVALID)) {
525 switch (IFM_SUBTYPE(mii->mii_media_active)) {
528 sc->sc_flags |= ET_FLAG_LINK;
531 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
532 sc->sc_flags |= ET_FLAG_LINK;
537 /* XXX Stop TX/RX MAC? */
538 if ((sc->sc_flags & ET_FLAG_LINK) == 0)
541 /* Program MACs with resolved speed/duplex/flow-control. */
542 ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
543 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
544 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
545 cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
546 ET_MAC_CFG1_LOOPBACK);
547 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
548 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
549 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
550 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
551 ((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) &
552 ET_MAC_CFG2_PREAMBLE_LEN_MASK);
554 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
555 cfg2 |= ET_MAC_CFG2_MODE_GMII;
557 cfg2 |= ET_MAC_CFG2_MODE_MII;
558 ctrl |= ET_MAC_CTRL_MODE_MII;
561 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
562 cfg2 |= ET_MAC_CFG2_FDX;
564 * Controller lacks automatic TX pause frame
565 * generation so it should be handled by driver.
566 * Even though driver can send pause frame with
567 * arbitrary pause time, controller does not
568 * provide a way that tells how many free RX
569 * buffers are available in controller. This
570 * limitation makes it hard to generate XON frame
571 * in time on driver side so don't enable TX flow
575 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE)
576 cfg1 |= ET_MAC_CFG1_TXFLOW;
578 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE)
579 cfg1 |= ET_MAC_CFG1_RXFLOW;
581 ctrl |= ET_MAC_CTRL_GHDX;
583 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
584 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
585 cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
586 CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1);
590 for (i = 0; i < NRETRY; ++i) {
591 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
592 if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
593 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
598 if_printf(ifp, "can't enable RX/TX\n");
599 sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
605 et_ifmedia_upd_locked(struct ifnet *ifp)
608 struct mii_data *mii;
609 struct mii_softc *miisc;
612 mii = device_get_softc(sc->sc_miibus);
613 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
615 return (mii_mediachg(mii));
619 et_ifmedia_upd(struct ifnet *ifp)
626 res = et_ifmedia_upd_locked(ifp);
633 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
636 struct mii_data *mii;
640 if ((ifp->if_flags & IFF_UP) == 0) {
645 mii = device_get_softc(sc->sc_miibus);
647 ifmr->ifm_active = mii->mii_media_active;
648 ifmr->ifm_status = mii->mii_media_status;
653 et_stop(struct et_softc *sc)
660 callout_stop(&sc->sc_tick);
661 /* Disable interrupts. */
662 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
664 CSR_WRITE_4(sc, ET_MAC_CFG1, CSR_READ_4(sc, ET_MAC_CFG1) & ~(
665 ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN));
677 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
679 sc->watchdog_timer = 0;
680 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
684 et_bus_config(struct et_softc *sc)
686 uint32_t val, max_plsz;
687 uint16_t ack_latency, replay_timer;
690 * Test whether EEPROM is valid
691 * NOTE: Read twice to get the correct value
693 pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
694 val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
695 if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
696 device_printf(sc->dev, "EEPROM status error 0x%02x\n", val);
702 if ((sc->sc_flags & ET_FLAG_PCIE) == 0)
706 * Configure ACK latency and replay timer according to
709 val = pci_read_config(sc->dev,
710 sc->sc_expcap + PCIER_DEVICE_CAP, 4);
711 max_plsz = val & PCIEM_CAP_MAX_PAYLOAD;
714 case ET_PCIV_DEVICE_CAPS_PLSZ_128:
715 ack_latency = ET_PCIV_ACK_LATENCY_128;
716 replay_timer = ET_PCIV_REPLAY_TIMER_128;
719 case ET_PCIV_DEVICE_CAPS_PLSZ_256:
720 ack_latency = ET_PCIV_ACK_LATENCY_256;
721 replay_timer = ET_PCIV_REPLAY_TIMER_256;
725 ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2);
726 replay_timer = pci_read_config(sc->dev,
727 ET_PCIR_REPLAY_TIMER, 2);
728 device_printf(sc->dev, "ack latency %u, replay timer %u\n",
729 ack_latency, replay_timer);
732 if (ack_latency != 0) {
733 pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
734 pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer,
739 * Set L0s and L1 latency timer to 2us
741 val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4);
742 val &= ~(PCIEM_LINK_CAP_L0S_EXIT | PCIEM_LINK_CAP_L1_EXIT);
743 /* L0s exit latency : 2us */
745 /* L1 exit latency : 2us */
747 pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4);
750 * Set max read request size to 2048 bytes
752 pci_set_max_read_req(sc->dev, 2048);
758 et_get_eaddr(device_t dev, uint8_t eaddr[])
763 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
764 for (i = 0; i < 4; ++i)
765 eaddr[i] = (val >> (8 * i)) & 0xff;
767 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
768 for (; i < ETHER_ADDR_LEN; ++i)
769 eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
773 et_reset(struct et_softc *sc)
776 CSR_WRITE_4(sc, ET_MAC_CFG1,
777 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
778 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
779 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
781 CSR_WRITE_4(sc, ET_SWRST,
782 ET_SWRST_TXDMA | ET_SWRST_RXDMA |
783 ET_SWRST_TXMAC | ET_SWRST_RXMAC |
784 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
786 CSR_WRITE_4(sc, ET_MAC_CFG1,
787 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
788 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
789 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
790 /* Disable interrupts. */
791 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
794 struct et_dmamap_arg {
795 bus_addr_t et_busaddr;
799 et_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
801 struct et_dmamap_arg *ctx;
806 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
809 ctx->et_busaddr = segs->ds_addr;
813 et_dma_ring_alloc(struct et_softc *sc, bus_size_t alignment, bus_size_t maxsize,
814 bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr,
817 struct et_dmamap_arg ctx;
820 error = bus_dma_tag_create(sc->sc_dtag, alignment, 0, BUS_SPACE_MAXADDR,
821 BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL,
824 device_printf(sc->dev, "could not create %s dma tag\n", msg);
827 /* Allocate DMA'able memory for ring. */
828 error = bus_dmamem_alloc(*tag, (void **)ring,
829 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
831 device_printf(sc->dev,
832 "could not allocate DMA'able memory for %s\n", msg);
835 /* Load the address of the ring. */
837 error = bus_dmamap_load(*tag, *map, *ring, maxsize, et_dma_map_addr,
838 &ctx, BUS_DMA_NOWAIT);
840 device_printf(sc->dev,
841 "could not load DMA'able memory for %s\n", msg);
844 *paddr = ctx.et_busaddr;
849 et_dma_ring_free(struct et_softc *sc, bus_dma_tag_t *tag, uint8_t **ring,
850 bus_dmamap_t map, bus_addr_t *paddr)
854 bus_dmamap_unload(*tag, map);
858 bus_dmamem_free(*tag, *ring, map);
862 bus_dma_tag_destroy(*tag);
868 et_dma_alloc(struct et_softc *sc)
870 struct et_txdesc_ring *tx_ring;
871 struct et_rxdesc_ring *rx_ring;
872 struct et_rxstat_ring *rxst_ring;
873 struct et_rxstatus_data *rxsd;
874 struct et_rxbuf_data *rbd;
875 struct et_txbuf_data *tbd;
876 struct et_txstatus_data *txsd;
879 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
880 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
881 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
884 device_printf(sc->dev, "could not allocate parent dma tag\n");
889 tx_ring = &sc->sc_tx_ring;
890 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_TX_RING_SIZE,
891 &tx_ring->tr_dtag, (uint8_t **)&tx_ring->tr_desc, &tx_ring->tr_dmap,
892 &tx_ring->tr_paddr, "TX ring");
896 /* TX status block. */
897 txsd = &sc->sc_tx_status;
898 error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN, sizeof(uint32_t),
899 &txsd->txsd_dtag, (uint8_t **)&txsd->txsd_status, &txsd->txsd_dmap,
900 &txsd->txsd_paddr, "TX status block");
904 /* RX ring 0, used as to recive small sized frames. */
905 rx_ring = &sc->sc_rx_ring[0];
906 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
907 &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
908 &rx_ring->rr_paddr, "RX ring 0");
909 rx_ring->rr_posreg = ET_RX_RING0_POS;
913 /* RX ring 1, used as to store normal sized frames. */
914 rx_ring = &sc->sc_rx_ring[1];
915 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
916 &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
917 &rx_ring->rr_paddr, "RX ring 1");
918 rx_ring->rr_posreg = ET_RX_RING1_POS;
923 rxst_ring = &sc->sc_rxstat_ring;
924 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RXSTAT_RING_SIZE,
925 &rxst_ring->rsr_dtag, (uint8_t **)&rxst_ring->rsr_stat,
926 &rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr, "RX stat ring");
930 /* RX status block. */
931 rxsd = &sc->sc_rx_status;
932 error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN,
933 sizeof(struct et_rxstatus), &rxsd->rxsd_dtag,
934 (uint8_t **)&rxsd->rxsd_status, &rxsd->rxsd_dmap,
935 &rxsd->rxsd_paddr, "RX status block");
939 /* Create parent DMA tag for mbufs. */
940 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
941 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
942 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
945 device_printf(sc->dev,
946 "could not allocate parent dma tag for mbuf\n");
950 /* Create DMA tag for mini RX mbufs to use RX ring 0. */
951 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
952 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1,
953 MHLEN, 0, NULL, NULL, &sc->sc_rx_mini_tag);
955 device_printf(sc->dev, "could not create mini RX dma tag\n");
959 /* Create DMA tag for standard RX mbufs to use RX ring 1. */
960 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
961 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
962 MCLBYTES, 0, NULL, NULL, &sc->sc_rx_tag);
964 device_printf(sc->dev, "could not create RX dma tag\n");
968 /* Create DMA tag for TX mbufs. */
969 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
970 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
971 MCLBYTES * ET_NSEG_MAX, ET_NSEG_MAX, MCLBYTES, 0, NULL, NULL,
974 device_printf(sc->dev, "could not create TX dma tag\n");
978 /* Initialize RX ring 0. */
979 rbd = &sc->sc_rx_data[0];
980 rbd->rbd_bufsize = ET_RXDMA_CTRL_RING0_128;
981 rbd->rbd_newbuf = et_newbuf_hdr;
982 rbd->rbd_discard = et_rxbuf_discard;
984 rbd->rbd_ring = &sc->sc_rx_ring[0];
985 /* Create DMA maps for mini RX buffers, ring 0. */
986 for (i = 0; i < ET_RX_NDESC; i++) {
987 error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
988 &rbd->rbd_buf[i].rb_dmap);
990 device_printf(sc->dev,
991 "could not create DMA map for mini RX mbufs\n");
996 /* Create a spare DMA map for mini RX buffers, ring 0. */
997 error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
998 &sc->sc_rx_mini_sparemap);
1000 device_printf(sc->dev,
1001 "could not create spare DMA map for mini RX mbuf\n");
1005 /* Initialize RX ring 1. */
1006 rbd = &sc->sc_rx_data[1];
1007 rbd->rbd_bufsize = ET_RXDMA_CTRL_RING1_2048;
1008 rbd->rbd_newbuf = et_newbuf_cluster;
1009 rbd->rbd_discard = et_rxbuf_discard;
1010 rbd->rbd_softc = sc;
1011 rbd->rbd_ring = &sc->sc_rx_ring[1];
1012 /* Create DMA maps for standard RX buffers, ring 1. */
1013 for (i = 0; i < ET_RX_NDESC; i++) {
1014 error = bus_dmamap_create(sc->sc_rx_tag, 0,
1015 &rbd->rbd_buf[i].rb_dmap);
1017 device_printf(sc->dev,
1018 "could not create DMA map for mini RX mbufs\n");
1023 /* Create a spare DMA map for standard RX buffers, ring 1. */
1024 error = bus_dmamap_create(sc->sc_rx_tag, 0, &sc->sc_rx_sparemap);
1026 device_printf(sc->dev,
1027 "could not create spare DMA map for RX mbuf\n");
1031 /* Create DMA maps for TX buffers. */
1032 tbd = &sc->sc_tx_data;
1033 for (i = 0; i < ET_TX_NDESC; i++) {
1034 error = bus_dmamap_create(sc->sc_tx_tag, 0,
1035 &tbd->tbd_buf[i].tb_dmap);
1037 device_printf(sc->dev,
1038 "could not create DMA map for TX mbufs\n");
1047 et_dma_free(struct et_softc *sc)
1049 struct et_txdesc_ring *tx_ring;
1050 struct et_rxdesc_ring *rx_ring;
1051 struct et_txstatus_data *txsd;
1052 struct et_rxstat_ring *rxst_ring;
1053 struct et_rxstatus_data *rxsd;
1054 struct et_rxbuf_data *rbd;
1055 struct et_txbuf_data *tbd;
1058 /* Destroy DMA maps for mini RX buffers, ring 0. */
1059 rbd = &sc->sc_rx_data[0];
1060 for (i = 0; i < ET_RX_NDESC; i++) {
1061 if (rbd->rbd_buf[i].rb_dmap) {
1062 bus_dmamap_destroy(sc->sc_rx_mini_tag,
1063 rbd->rbd_buf[i].rb_dmap);
1064 rbd->rbd_buf[i].rb_dmap = NULL;
1067 if (sc->sc_rx_mini_sparemap) {
1068 bus_dmamap_destroy(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap);
1069 sc->sc_rx_mini_sparemap = NULL;
1071 if (sc->sc_rx_mini_tag) {
1072 bus_dma_tag_destroy(sc->sc_rx_mini_tag);
1073 sc->sc_rx_mini_tag = NULL;
1076 /* Destroy DMA maps for standard RX buffers, ring 1. */
1077 rbd = &sc->sc_rx_data[1];
1078 for (i = 0; i < ET_RX_NDESC; i++) {
1079 if (rbd->rbd_buf[i].rb_dmap) {
1080 bus_dmamap_destroy(sc->sc_rx_tag,
1081 rbd->rbd_buf[i].rb_dmap);
1082 rbd->rbd_buf[i].rb_dmap = NULL;
1085 if (sc->sc_rx_sparemap) {
1086 bus_dmamap_destroy(sc->sc_rx_tag, sc->sc_rx_sparemap);
1087 sc->sc_rx_sparemap = NULL;
1089 if (sc->sc_rx_tag) {
1090 bus_dma_tag_destroy(sc->sc_rx_tag);
1091 sc->sc_rx_tag = NULL;
1094 /* Destroy DMA maps for TX buffers. */
1095 tbd = &sc->sc_tx_data;
1096 for (i = 0; i < ET_TX_NDESC; i++) {
1097 if (tbd->tbd_buf[i].tb_dmap) {
1098 bus_dmamap_destroy(sc->sc_tx_tag,
1099 tbd->tbd_buf[i].tb_dmap);
1100 tbd->tbd_buf[i].tb_dmap = NULL;
1103 if (sc->sc_tx_tag) {
1104 bus_dma_tag_destroy(sc->sc_tx_tag);
1105 sc->sc_tx_tag = NULL;
1108 /* Destroy mini RX ring, ring 0. */
1109 rx_ring = &sc->sc_rx_ring[0];
1110 et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1111 rx_ring->rr_dmap, &rx_ring->rr_paddr);
1112 /* Destroy standard RX ring, ring 1. */
1113 rx_ring = &sc->sc_rx_ring[1];
1114 et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1115 rx_ring->rr_dmap, &rx_ring->rr_paddr);
1116 /* Destroy RX stat ring. */
1117 rxst_ring = &sc->sc_rxstat_ring;
1118 et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1119 rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr);
1120 /* Destroy RX status block. */
1121 rxsd = &sc->sc_rx_status;
1122 et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1123 rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr);
1124 /* Destroy TX ring. */
1125 tx_ring = &sc->sc_tx_ring;
1126 et_dma_ring_free(sc, &tx_ring->tr_dtag, (void *)&tx_ring->tr_desc,
1127 tx_ring->tr_dmap, &tx_ring->tr_paddr);
1128 /* Destroy TX status block. */
1129 txsd = &sc->sc_tx_status;
1130 et_dma_ring_free(sc, &txsd->txsd_dtag, (void *)&txsd->txsd_status,
1131 txsd->txsd_dmap, &txsd->txsd_paddr);
1133 /* Destroy the parent tag. */
1135 bus_dma_tag_destroy(sc->sc_dtag);
1141 et_chip_attach(struct et_softc *sc)
1146 * Perform minimal initialization
1149 /* Disable loopback */
1150 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1153 CSR_WRITE_4(sc, ET_MAC_CFG1,
1154 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1155 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1156 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1159 * Setup half duplex mode
1161 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1162 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1163 (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1164 ET_MAC_HDX_EXC_DEFER;
1165 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1167 /* Clear MAC control */
1168 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1171 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1173 /* Bring MAC out of reset state */
1174 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1176 /* Enable memory controllers */
1177 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1183 struct et_softc *sc;
1190 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1193 status = CSR_READ_4(sc, ET_INTR_STATUS);
1194 if ((status & ET_INTRS) == 0)
1197 /* Disable further interrupts. */
1198 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
1200 if (status & (ET_INTR_RXDMA_ERROR | ET_INTR_TXDMA_ERROR)) {
1201 device_printf(sc->dev, "DMA error(0x%08x) -- resetting\n",
1203 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1208 if (status & ET_INTR_RXDMA)
1210 if (status & (ET_INTR_TXDMA | ET_INTR_TIMER))
1212 if (status & ET_INTR_TIMER)
1213 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1214 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1215 CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1216 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1217 et_start_locked(ifp);
1224 et_init_locked(struct et_softc *sc)
1232 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1238 et_init_tx_ring(sc);
1239 error = et_init_rx_ring(sc);
1243 error = et_chip_init(sc);
1248 * Start TX/RX DMA engine
1250 error = et_start_rxdma(sc);
1254 error = et_start_txdma(sc);
1258 /* Enable interrupts. */
1259 CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1261 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1263 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1264 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1266 sc->sc_flags &= ~ET_FLAG_LINK;
1267 et_ifmedia_upd_locked(ifp);
1269 callout_reset(&sc->sc_tick, hz, et_tick, sc);
1279 struct et_softc *sc = xsc;
1287 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1289 struct et_softc *sc;
1290 struct mii_data *mii;
1292 int error, mask, max_framelen;
1295 ifr = (struct ifreq *)data;
1302 if (ifp->if_flags & IFF_UP) {
1303 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1304 if ((ifp->if_flags ^ sc->sc_if_flags) &
1305 (IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1311 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1314 sc->sc_if_flags = ifp->if_flags;
1320 mii = device_get_softc(sc->sc_miibus);
1321 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1326 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1336 if (sc->sc_flags & ET_FLAG_JUMBO)
1337 max_framelen = ET_JUMBO_FRAMELEN;
1340 max_framelen = MCLBYTES - 1;
1342 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1348 if (ifp->if_mtu != ifr->ifr_mtu) {
1349 ifp->if_mtu = ifr->ifr_mtu;
1350 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1351 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1360 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1361 if ((mask & IFCAP_TXCSUM) != 0 &&
1362 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1363 ifp->if_capenable ^= IFCAP_TXCSUM;
1364 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1365 ifp->if_hwassist |= ET_CSUM_FEATURES;
1367 ifp->if_hwassist &= ~ET_CSUM_FEATURES;
1373 error = ether_ioctl(ifp, cmd, data);
1380 et_start_locked(struct ifnet *ifp)
1382 struct et_softc *sc;
1383 struct mbuf *m_head = NULL;
1384 struct et_txdesc_ring *tx_ring;
1385 struct et_txbuf_data *tbd;
1386 uint32_t tx_ready_pos;
1392 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1394 (sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) !=
1395 (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED))
1399 * Driver does not request TX completion interrupt for every
1400 * queued frames to prevent generating excessive interrupts.
1401 * This means driver may wait for TX completion interrupt even
1402 * though some frames were successfully transmitted. Reclaiming
1403 * transmitted frames will ensure driver see all available
1406 tbd = &sc->sc_tx_data;
1407 if (tbd->tbd_used > (ET_TX_NDESC * 2) / 3)
1410 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1411 if (tbd->tbd_used + ET_NSEG_SPARE >= ET_TX_NDESC) {
1412 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1416 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1420 if (et_encap(sc, &m_head)) {
1421 if (m_head == NULL) {
1422 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1425 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1426 if (tbd->tbd_used > 0)
1427 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1431 ETHER_BPF_MTAP(ifp, m_head);
1435 tx_ring = &sc->sc_tx_ring;
1436 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1437 BUS_DMASYNC_PREWRITE);
1438 tx_ready_pos = tx_ring->tr_ready_index &
1439 ET_TX_READY_POS_INDEX_MASK;
1440 if (tx_ring->tr_ready_wrap)
1441 tx_ready_pos |= ET_TX_READY_POS_WRAP;
1442 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
1443 sc->watchdog_timer = 5;
1448 et_start(struct ifnet *ifp)
1450 struct et_softc *sc;
1454 et_start_locked(ifp);
1459 et_watchdog(struct et_softc *sc)
1465 if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
1468 bus_dmamap_sync(sc->sc_tx_status.txsd_dtag, sc->sc_tx_status.txsd_dmap,
1469 BUS_DMASYNC_POSTREAD);
1470 status = le32toh(*(sc->sc_tx_status.txsd_status));
1471 if_printf(sc->ifp, "watchdog timed out (0x%08x) -- resetting\n",
1474 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
1475 sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1477 return (EJUSTRETURN);
1481 et_stop_rxdma(struct et_softc *sc)
1484 CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1485 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1488 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1489 if_printf(sc->ifp, "can't stop RX DMA engine\n");
1496 et_stop_txdma(struct et_softc *sc)
1499 CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1500 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1505 et_free_tx_ring(struct et_softc *sc)
1507 struct et_txdesc_ring *tx_ring;
1508 struct et_txbuf_data *tbd;
1509 struct et_txbuf *tb;
1512 tbd = &sc->sc_tx_data;
1513 tx_ring = &sc->sc_tx_ring;
1514 for (i = 0; i < ET_TX_NDESC; ++i) {
1515 tb = &tbd->tbd_buf[i];
1516 if (tb->tb_mbuf != NULL) {
1517 bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
1518 BUS_DMASYNC_POSTWRITE);
1519 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1520 m_freem(tb->tb_mbuf);
1527 et_free_rx_ring(struct et_softc *sc)
1529 struct et_rxbuf_data *rbd;
1530 struct et_rxdesc_ring *rx_ring;
1531 struct et_rxbuf *rb;
1535 rx_ring = &sc->sc_rx_ring[0];
1536 rbd = &sc->sc_rx_data[0];
1537 for (i = 0; i < ET_RX_NDESC; ++i) {
1538 rb = &rbd->rbd_buf[i];
1539 if (rb->rb_mbuf != NULL) {
1540 bus_dmamap_sync(sc->sc_rx_mini_tag, rx_ring->rr_dmap,
1541 BUS_DMASYNC_POSTREAD);
1542 bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
1543 m_freem(rb->rb_mbuf);
1549 rx_ring = &sc->sc_rx_ring[1];
1550 rbd = &sc->sc_rx_data[1];
1551 for (i = 0; i < ET_RX_NDESC; ++i) {
1552 rb = &rbd->rbd_buf[i];
1553 if (rb->rb_mbuf != NULL) {
1554 bus_dmamap_sync(sc->sc_rx_tag, rx_ring->rr_dmap,
1555 BUS_DMASYNC_POSTREAD);
1556 bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
1557 m_freem(rb->rb_mbuf);
1564 et_setmulti(struct et_softc *sc)
1567 uint32_t hash[4] = { 0, 0, 0, 0 };
1568 uint32_t rxmac_ctrl, pktfilt;
1569 struct ifmultiaddr *ifma;
1575 pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1576 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1578 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1579 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1580 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1585 if_maddr_rlock(ifp);
1586 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1589 if (ifma->ifma_addr->sa_family != AF_LINK)
1592 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1593 ifma->ifma_addr), ETHER_ADDR_LEN);
1594 h = (h & 0x3f800000) >> 23;
1597 if (h >= 32 && h < 64) {
1600 } else if (h >= 64 && h < 96) {
1603 } else if (h >= 96) {
1611 if_maddr_runlock(ifp);
1613 for (i = 0; i < 4; ++i)
1614 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1617 pktfilt |= ET_PKTFILT_MCAST;
1618 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1620 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1621 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1625 et_chip_init(struct et_softc *sc)
1629 int error, frame_len, rxmem_size;
1633 * Split 16Kbytes internal memory between TX and RX
1634 * according to frame length.
1636 frame_len = ET_FRAMELEN(ifp->if_mtu);
1637 if (frame_len < 2048) {
1638 rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1639 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1640 rxmem_size = ET_MEM_SIZE / 2;
1642 rxmem_size = ET_MEM_SIZE -
1643 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1645 rxq_end = ET_QUEUE_ADDR(rxmem_size);
1647 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1648 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1649 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1650 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1653 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1655 /* Clear MSI configure */
1656 if ((sc->sc_flags & ET_FLAG_MSI) == 0)
1657 CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1660 CSR_WRITE_4(sc, ET_TIMER, 0);
1662 /* Initialize MAC */
1665 /* Enable memory controllers */
1666 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1668 /* Initialize RX MAC */
1671 /* Initialize TX MAC */
1674 /* Initialize RX DMA engine */
1675 error = et_init_rxdma(sc);
1679 /* Initialize TX DMA engine */
1680 error = et_init_txdma(sc);
1688 et_init_tx_ring(struct et_softc *sc)
1690 struct et_txdesc_ring *tx_ring;
1691 struct et_txbuf_data *tbd;
1692 struct et_txstatus_data *txsd;
1694 tx_ring = &sc->sc_tx_ring;
1695 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1696 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1697 BUS_DMASYNC_PREWRITE);
1699 tbd = &sc->sc_tx_data;
1700 tbd->tbd_start_index = 0;
1701 tbd->tbd_start_wrap = 0;
1704 txsd = &sc->sc_tx_status;
1705 bzero(txsd->txsd_status, sizeof(uint32_t));
1706 bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1707 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1711 et_init_rx_ring(struct et_softc *sc)
1713 struct et_rxstatus_data *rxsd;
1714 struct et_rxstat_ring *rxst_ring;
1715 struct et_rxbuf_data *rbd;
1718 for (n = 0; n < ET_RX_NRING; ++n) {
1719 rbd = &sc->sc_rx_data[n];
1720 for (i = 0; i < ET_RX_NDESC; ++i) {
1721 error = rbd->rbd_newbuf(rbd, i);
1723 if_printf(sc->ifp, "%d ring %d buf, "
1724 "newbuf failed: %d\n", n, i, error);
1730 rxsd = &sc->sc_rx_status;
1731 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1732 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1733 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1735 rxst_ring = &sc->sc_rxstat_ring;
1736 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1737 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1738 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1744 et_init_rxdma(struct et_softc *sc)
1746 struct et_rxstatus_data *rxsd;
1747 struct et_rxstat_ring *rxst_ring;
1748 struct et_rxdesc_ring *rx_ring;
1751 error = et_stop_rxdma(sc);
1753 if_printf(sc->ifp, "can't init RX DMA engine\n");
1760 rxsd = &sc->sc_rx_status;
1761 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1762 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1765 * Install RX stat ring
1767 rxst_ring = &sc->sc_rxstat_ring;
1768 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1769 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1770 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1771 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1772 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1774 /* Match ET_RXSTAT_POS */
1775 rxst_ring->rsr_index = 0;
1776 rxst_ring->rsr_wrap = 0;
1779 * Install the 2nd RX descriptor ring
1781 rx_ring = &sc->sc_rx_ring[1];
1782 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1783 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1784 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1785 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1786 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1788 /* Match ET_RX_RING1_POS */
1789 rx_ring->rr_index = 0;
1790 rx_ring->rr_wrap = 1;
1793 * Install the 1st RX descriptor ring
1795 rx_ring = &sc->sc_rx_ring[0];
1796 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1797 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1798 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1799 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1800 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1802 /* Match ET_RX_RING0_POS */
1803 rx_ring->rr_index = 0;
1804 rx_ring->rr_wrap = 1;
1807 * RX intr moderation
1809 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1810 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1816 et_init_txdma(struct et_softc *sc)
1818 struct et_txdesc_ring *tx_ring;
1819 struct et_txstatus_data *txsd;
1822 error = et_stop_txdma(sc);
1824 if_printf(sc->ifp, "can't init TX DMA engine\n");
1829 * Install TX descriptor ring
1831 tx_ring = &sc->sc_tx_ring;
1832 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1833 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1834 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1839 txsd = &sc->sc_tx_status;
1840 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1841 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1843 CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1845 /* Match ET_TX_READY_POS */
1846 tx_ring->tr_ready_index = 0;
1847 tx_ring->tr_ready_wrap = 0;
1853 et_init_mac(struct et_softc *sc)
1856 const uint8_t *eaddr;
1860 CSR_WRITE_4(sc, ET_MAC_CFG1,
1861 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1862 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1863 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1866 * Setup inter packet gap
1868 val = (56 << ET_IPG_NONB2B_1_SHIFT) |
1869 (88 << ET_IPG_NONB2B_2_SHIFT) |
1870 (80 << ET_IPG_MINIFG_SHIFT) |
1871 (96 << ET_IPG_B2B_SHIFT);
1872 CSR_WRITE_4(sc, ET_IPG, val);
1875 * Setup half duplex mode
1877 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1878 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1879 (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1880 ET_MAC_HDX_EXC_DEFER;
1881 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1883 /* Clear MAC control */
1884 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1887 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1893 eaddr = IF_LLADDR(ifp);
1894 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1895 CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1896 val = (eaddr[0] << 16) | (eaddr[1] << 24);
1897 CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1899 /* Set max frame length */
1900 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1902 /* Bring MAC out of reset state */
1903 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1907 et_init_rxmac(struct et_softc *sc)
1910 const uint8_t *eaddr;
1914 /* Disable RX MAC and WOL */
1915 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1918 * Clear all WOL related registers
1920 for (i = 0; i < 3; ++i)
1921 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1922 for (i = 0; i < 20; ++i)
1923 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1926 * Set WOL source address. XXX is this necessary?
1929 eaddr = IF_LLADDR(ifp);
1930 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1931 CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1932 val = (eaddr[0] << 8) | eaddr[1];
1933 CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1935 /* Clear packet filters */
1936 CSR_WRITE_4(sc, ET_PKTFILT, 0);
1938 /* No ucast filtering */
1939 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1940 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1941 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1943 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1945 * In order to transmit jumbo packets greater than
1946 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1947 * RX MAC and RX DMA needs to be reduced in size to
1948 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In
1949 * order to implement this, we must use "cut through"
1950 * mode in the RX MAC, which chops packets down into
1951 * segments. In this case we selected 256 bytes,
1952 * since this is the size of the PCI-Express TLP's
1953 * that the ET1310 uses.
1955 val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) |
1956 ET_RXMAC_MC_SEGSZ_ENABLE;
1960 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1962 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1964 /* Initialize RX MAC management register */
1965 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1967 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1969 CSR_WRITE_4(sc, ET_RXMAC_MGT,
1970 ET_RXMAC_MGT_PASS_ECRC |
1971 ET_RXMAC_MGT_PASS_ELEN |
1972 ET_RXMAC_MGT_PASS_ETRUNC |
1973 ET_RXMAC_MGT_CHECK_PKT);
1976 * Configure runt filtering (may not work on certain chip generation)
1978 val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) &
1979 ET_PKTFILT_MINLEN_MASK;
1980 val |= ET_PKTFILT_FRAG;
1981 CSR_WRITE_4(sc, ET_PKTFILT, val);
1983 /* Enable RX MAC but leave WOL disabled */
1984 CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1985 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1988 * Setup multicast hash and allmulti/promisc mode
1994 et_init_txmac(struct et_softc *sc)
1997 /* Disable TX MAC and FC(?) */
1998 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
2001 * Initialize pause time.
2002 * This register should be set before XON/XOFF frame is
2005 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0 << ET_TXMAC_FLOWCTRL_CFPT_SHIFT);
2007 /* Enable TX MAC but leave FC(?) diabled */
2008 CSR_WRITE_4(sc, ET_TXMAC_CTRL,
2009 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
2013 et_start_rxdma(struct et_softc *sc)
2017 val = (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) |
2018 ET_RXDMA_CTRL_RING0_ENABLE;
2019 val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) |
2020 ET_RXDMA_CTRL_RING1_ENABLE;
2022 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
2026 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
2027 if_printf(sc->ifp, "can't start RX DMA engine\n");
2034 et_start_txdma(struct et_softc *sc)
2037 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
2042 et_rxeof(struct et_softc *sc)
2044 struct et_rxstatus_data *rxsd;
2045 struct et_rxstat_ring *rxst_ring;
2046 struct et_rxbuf_data *rbd;
2047 struct et_rxdesc_ring *rx_ring;
2048 struct et_rxstat *st;
2051 uint32_t rxstat_pos, rxring_pos;
2052 uint32_t rxst_info1, rxst_info2, rxs_stat_ring;
2053 int buflen, buf_idx, npost[2], ring_idx;
2054 int rxst_index, rxst_wrap;
2059 rxsd = &sc->sc_rx_status;
2060 rxst_ring = &sc->sc_rxstat_ring;
2062 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2065 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2066 BUS_DMASYNC_POSTREAD);
2067 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2068 BUS_DMASYNC_POSTREAD);
2070 npost[0] = npost[1] = 0;
2071 rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring);
2072 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
2073 rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >>
2074 ET_RXS_STATRING_INDEX_SHIFT;
2076 while (rxst_index != rxst_ring->rsr_index ||
2077 rxst_wrap != rxst_ring->rsr_wrap) {
2078 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2081 MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
2082 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
2083 rxst_info1 = le32toh(st->rxst_info1);
2084 rxst_info2 = le32toh(st->rxst_info2);
2085 buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >>
2086 ET_RXST_INFO2_LEN_SHIFT;
2087 buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >>
2088 ET_RXST_INFO2_BUFIDX_SHIFT;
2089 ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >>
2090 ET_RXST_INFO2_RINGIDX_SHIFT;
2092 if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
2093 rxst_ring->rsr_index = 0;
2094 rxst_ring->rsr_wrap ^= 1;
2096 rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK;
2097 if (rxst_ring->rsr_wrap)
2098 rxstat_pos |= ET_RXSTAT_POS_WRAP;
2099 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
2101 if (ring_idx >= ET_RX_NRING) {
2102 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2103 if_printf(ifp, "invalid ring index %d\n", ring_idx);
2106 if (buf_idx >= ET_RX_NDESC) {
2107 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2108 if_printf(ifp, "invalid buf index %d\n", buf_idx);
2112 rbd = &sc->sc_rx_data[ring_idx];
2113 m = rbd->rbd_buf[buf_idx].rb_mbuf;
2114 if ((rxst_info1 & ET_RXST_INFO1_OK) == 0){
2115 /* Discard errored frame. */
2116 rbd->rbd_discard(rbd, buf_idx);
2117 } else if (rbd->rbd_newbuf(rbd, buf_idx) != 0) {
2118 /* No available mbufs, discard it. */
2119 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2120 rbd->rbd_discard(rbd, buf_idx);
2122 buflen -= ETHER_CRC_LEN;
2123 if (buflen < ETHER_HDR_LEN) {
2125 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2127 m->m_pkthdr.len = m->m_len = buflen;
2128 m->m_pkthdr.rcvif = ifp;
2130 ifp->if_input(ifp, m);
2135 rx_ring = &sc->sc_rx_ring[ring_idx];
2136 if (buf_idx != rx_ring->rr_index) {
2138 "WARNING!! ring %d, buf_idx %d, rr_idx %d\n",
2139 ring_idx, buf_idx, rx_ring->rr_index);
2142 MPASS(rx_ring->rr_index < ET_RX_NDESC);
2143 if (++rx_ring->rr_index == ET_RX_NDESC) {
2144 rx_ring->rr_index = 0;
2145 rx_ring->rr_wrap ^= 1;
2147 rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK;
2148 if (rx_ring->rr_wrap)
2149 rxring_pos |= ET_RX_RING_POS_WRAP;
2150 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
2153 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2154 BUS_DMASYNC_PREREAD);
2155 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2156 BUS_DMASYNC_PREREAD);
2160 et_encap(struct et_softc *sc, struct mbuf **m0)
2162 struct et_txdesc_ring *tx_ring;
2163 struct et_txbuf_data *tbd;
2164 struct et_txdesc *td;
2166 bus_dma_segment_t segs[ET_NSEG_MAX];
2168 uint32_t csum_flags, last_td_ctrl2;
2169 int error, i, idx, first_idx, last_idx, nsegs;
2171 tx_ring = &sc->sc_tx_ring;
2172 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2173 tbd = &sc->sc_tx_data;
2174 first_idx = tx_ring->tr_ready_index;
2175 map = tbd->tbd_buf[first_idx].tb_dmap;
2177 error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, &nsegs,
2179 if (error == EFBIG) {
2180 m = m_collapse(*m0, M_NOWAIT, ET_NSEG_MAX);
2187 error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs,
2194 } else if (error != 0)
2197 /* Check for descriptor overruns. */
2198 if (tbd->tbd_used + nsegs > ET_TX_NDESC - 1) {
2199 bus_dmamap_unload(sc->sc_tx_tag, map);
2202 bus_dmamap_sync(sc->sc_tx_tag, map, BUS_DMASYNC_PREWRITE);
2204 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2206 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2207 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2208 last_td_ctrl2 |= ET_TDCTRL2_INTR;
2213 if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) {
2214 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2215 csum_flags |= ET_TDCTRL2_CSUM_IP;
2216 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2217 csum_flags |= ET_TDCTRL2_CSUM_UDP;
2218 else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2219 csum_flags |= ET_TDCTRL2_CSUM_TCP;
2222 for (i = 0; i < nsegs; ++i) {
2223 idx = (first_idx + i) % ET_TX_NDESC;
2224 td = &tx_ring->tr_desc[idx];
2225 td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr));
2226 td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr));
2227 td->td_ctrl1 = htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK);
2228 if (i == nsegs - 1) {
2230 td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags);
2233 td->td_ctrl2 = htole32(csum_flags);
2235 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2236 if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2237 tx_ring->tr_ready_index = 0;
2238 tx_ring->tr_ready_wrap ^= 1;
2241 td = &tx_ring->tr_desc[first_idx];
2243 td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG);
2245 MPASS(last_idx >= 0);
2246 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2247 tbd->tbd_buf[last_idx].tb_dmap = map;
2248 tbd->tbd_buf[last_idx].tb_mbuf = m;
2250 tbd->tbd_used += nsegs;
2251 MPASS(tbd->tbd_used <= ET_TX_NDESC);
2257 et_txeof(struct et_softc *sc)
2259 struct et_txdesc_ring *tx_ring;
2260 struct et_txbuf_data *tbd;
2261 struct et_txbuf *tb;
2269 tx_ring = &sc->sc_tx_ring;
2270 tbd = &sc->sc_tx_data;
2272 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2275 if (tbd->tbd_used == 0)
2278 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2279 BUS_DMASYNC_POSTWRITE);
2281 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2282 end = tx_done & ET_TX_DONE_POS_INDEX_MASK;
2283 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2285 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2286 MPASS(tbd->tbd_start_index < ET_TX_NDESC);
2287 tb = &tbd->tbd_buf[tbd->tbd_start_index];
2288 if (tb->tb_mbuf != NULL) {
2289 bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
2290 BUS_DMASYNC_POSTWRITE);
2291 bus_dmamap_unload(sc->sc_tx_tag, tb->tb_dmap);
2292 m_freem(tb->tb_mbuf);
2296 if (++tbd->tbd_start_index == ET_TX_NDESC) {
2297 tbd->tbd_start_index = 0;
2298 tbd->tbd_start_wrap ^= 1;
2301 MPASS(tbd->tbd_used > 0);
2305 if (tbd->tbd_used == 0)
2306 sc->watchdog_timer = 0;
2307 if (tbd->tbd_used + ET_NSEG_SPARE < ET_TX_NDESC)
2308 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2314 struct et_softc *sc;
2316 struct mii_data *mii;
2321 mii = device_get_softc(sc->sc_miibus);
2324 et_stats_update(sc);
2325 if (et_watchdog(sc) == EJUSTRETURN)
2327 callout_reset(&sc->sc_tick, hz, et_tick, sc);
2331 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx)
2333 struct et_softc *sc;
2334 struct et_rxdesc *desc;
2335 struct et_rxbuf *rb;
2337 bus_dma_segment_t segs[1];
2341 MPASS(buf_idx < ET_RX_NDESC);
2342 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2345 m->m_len = m->m_pkthdr.len = MCLBYTES;
2346 m_adj(m, ETHER_ALIGN);
2348 sc = rbd->rbd_softc;
2349 rb = &rbd->rbd_buf[buf_idx];
2351 if (bus_dmamap_load_mbuf_sg(sc->sc_rx_tag, sc->sc_rx_sparemap, m,
2352 segs, &nsegs, 0) != 0) {
2356 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2358 if (rb->rb_mbuf != NULL) {
2359 bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap,
2360 BUS_DMASYNC_POSTREAD);
2361 bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
2364 rb->rb_dmap = sc->sc_rx_sparemap;
2365 sc->sc_rx_sparemap = dmap;
2366 bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2369 desc = &rbd->rbd_ring->rr_desc[buf_idx];
2370 desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2371 desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2372 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2373 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2374 BUS_DMASYNC_PREWRITE);
2379 et_rxbuf_discard(struct et_rxbuf_data *rbd, int buf_idx)
2381 struct et_rxdesc *desc;
2383 desc = &rbd->rbd_ring->rr_desc[buf_idx];
2384 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2385 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2386 BUS_DMASYNC_PREWRITE);
2390 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx)
2392 struct et_softc *sc;
2393 struct et_rxdesc *desc;
2394 struct et_rxbuf *rb;
2396 bus_dma_segment_t segs[1];
2400 MPASS(buf_idx < ET_RX_NDESC);
2401 MGETHDR(m, M_NOWAIT, MT_DATA);
2404 m->m_len = m->m_pkthdr.len = MHLEN;
2405 m_adj(m, ETHER_ALIGN);
2407 sc = rbd->rbd_softc;
2408 rb = &rbd->rbd_buf[buf_idx];
2410 if (bus_dmamap_load_mbuf_sg(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap,
2411 m, segs, &nsegs, 0) != 0) {
2415 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2417 if (rb->rb_mbuf != NULL) {
2418 bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap,
2419 BUS_DMASYNC_POSTREAD);
2420 bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
2423 rb->rb_dmap = sc->sc_rx_mini_sparemap;
2424 sc->sc_rx_mini_sparemap = dmap;
2425 bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2428 desc = &rbd->rbd_ring->rr_desc[buf_idx];
2429 desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2430 desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2431 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2432 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2433 BUS_DMASYNC_PREWRITE);
2437 #define ET_SYSCTL_STAT_ADD32(c, h, n, p, d) \
2438 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2439 #define ET_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2440 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2443 * Create sysctl tree
2446 et_add_sysctls(struct et_softc * sc)
2448 struct sysctl_ctx_list *ctx;
2449 struct sysctl_oid_list *children, *parent;
2450 struct sysctl_oid *tree;
2451 struct et_hw_stats *stats;
2453 ctx = device_get_sysctl_ctx(sc->dev);
2454 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2456 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
2457 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I",
2458 "RX IM, # packets per RX interrupt");
2459 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
2460 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I",
2461 "RX IM, RX interrupt delay (x10 usec)");
2462 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
2463 CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
2464 "TX IM, # segments per TX interrupt");
2465 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
2466 CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
2468 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
2469 NULL, "ET statistics");
2470 parent = SYSCTL_CHILDREN(tree);
2472 /* TX/RX statistics. */
2473 stats = &sc->sc_stats;
2474 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_64", &stats->pkts_64,
2475 "0 to 64 bytes frames");
2476 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_65_127", &stats->pkts_65,
2477 "65 to 127 bytes frames");
2478 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_128_255", &stats->pkts_128,
2479 "128 to 255 bytes frames");
2480 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_256_511", &stats->pkts_256,
2481 "256 to 511 bytes frames");
2482 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_512_1023", &stats->pkts_512,
2483 "512 to 1023 bytes frames");
2484 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1024_1518", &stats->pkts_1024,
2485 "1024 to 1518 bytes frames");
2486 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1519_1522", &stats->pkts_1519,
2487 "1519 to 1522 bytes frames");
2489 /* RX statistics. */
2490 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2491 NULL, "RX MAC statistics");
2492 children = SYSCTL_CHILDREN(tree);
2493 ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
2494 &stats->rx_bytes, "Good bytes");
2495 ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
2496 &stats->rx_frames, "Good frames");
2497 ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
2498 &stats->rx_crcerrs, "CRC errors");
2499 ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
2500 &stats->rx_mcast, "Multicast frames");
2501 ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
2502 &stats->rx_bcast, "Broadcast frames");
2503 ET_SYSCTL_STAT_ADD32(ctx, children, "control",
2504 &stats->rx_control, "Control frames");
2505 ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
2506 &stats->rx_pause, "Pause frames");
2507 ET_SYSCTL_STAT_ADD32(ctx, children, "unknown_control",
2508 &stats->rx_unknown_control, "Unknown control frames");
2509 ET_SYSCTL_STAT_ADD32(ctx, children, "align_errs",
2510 &stats->rx_alignerrs, "Alignment errors");
2511 ET_SYSCTL_STAT_ADD32(ctx, children, "len_errs",
2512 &stats->rx_lenerrs, "Frames with length mismatched");
2513 ET_SYSCTL_STAT_ADD32(ctx, children, "code_errs",
2514 &stats->rx_codeerrs, "Frames with code error");
2515 ET_SYSCTL_STAT_ADD32(ctx, children, "cs_errs",
2516 &stats->rx_cserrs, "Frames with carrier sense error");
2517 ET_SYSCTL_STAT_ADD32(ctx, children, "runts",
2518 &stats->rx_runts, "Too short frames");
2519 ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
2520 &stats->rx_oversize, "Oversized frames");
2521 ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
2522 &stats->rx_fragments, "Fragmented frames");
2523 ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
2524 &stats->rx_jabbers, "Frames with jabber error");
2525 ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
2526 &stats->rx_drop, "Dropped frames");
2528 /* TX statistics. */
2529 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2530 NULL, "TX MAC statistics");
2531 children = SYSCTL_CHILDREN(tree);
2532 ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
2533 &stats->tx_bytes, "Good bytes");
2534 ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
2535 &stats->tx_frames, "Good frames");
2536 ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
2537 &stats->tx_mcast, "Multicast frames");
2538 ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
2539 &stats->tx_bcast, "Broadcast frames");
2540 ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
2541 &stats->tx_pause, "Pause frames");
2542 ET_SYSCTL_STAT_ADD32(ctx, children, "deferred",
2543 &stats->tx_deferred, "Deferred frames");
2544 ET_SYSCTL_STAT_ADD32(ctx, children, "excess_deferred",
2545 &stats->tx_excess_deferred, "Excessively deferred frames");
2546 ET_SYSCTL_STAT_ADD32(ctx, children, "single_colls",
2547 &stats->tx_single_colls, "Single collisions");
2548 ET_SYSCTL_STAT_ADD32(ctx, children, "multi_colls",
2549 &stats->tx_multi_colls, "Multiple collisions");
2550 ET_SYSCTL_STAT_ADD32(ctx, children, "late_colls",
2551 &stats->tx_late_colls, "Late collisions");
2552 ET_SYSCTL_STAT_ADD32(ctx, children, "excess_colls",
2553 &stats->tx_excess_colls, "Excess collisions");
2554 ET_SYSCTL_STAT_ADD32(ctx, children, "total_colls",
2555 &stats->tx_total_colls, "Total collisions");
2556 ET_SYSCTL_STAT_ADD32(ctx, children, "pause_honored",
2557 &stats->tx_pause_honored, "Honored pause frames");
2558 ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
2559 &stats->tx_drop, "Dropped frames");
2560 ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
2561 &stats->tx_jabbers, "Frames with jabber errors");
2562 ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
2563 &stats->tx_crcerrs, "Frames with CRC errors");
2564 ET_SYSCTL_STAT_ADD32(ctx, children, "control",
2565 &stats->tx_control, "Control frames");
2566 ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
2567 &stats->tx_oversize, "Oversized frames");
2568 ET_SYSCTL_STAT_ADD32(ctx, children, "undersize",
2569 &stats->tx_undersize, "Undersized frames");
2570 ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
2571 &stats->tx_fragments, "Fragmented frames");
2574 #undef ET_SYSCTL_STAT_ADD32
2575 #undef ET_SYSCTL_STAT_ADD64
2578 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2580 struct et_softc *sc;
2586 v = sc->sc_rx_intr_npkts;
2587 error = sysctl_handle_int(oidp, &v, 0, req);
2588 if (error || req->newptr == NULL)
2595 if (sc->sc_rx_intr_npkts != v) {
2596 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2597 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2598 sc->sc_rx_intr_npkts = v;
2605 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2607 struct et_softc *sc;
2613 v = sc->sc_rx_intr_delay;
2614 error = sysctl_handle_int(oidp, &v, 0, req);
2615 if (error || req->newptr == NULL)
2622 if (sc->sc_rx_intr_delay != v) {
2623 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2624 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2625 sc->sc_rx_intr_delay = v;
2632 et_stats_update(struct et_softc *sc)
2634 struct et_hw_stats *stats;
2636 stats = &sc->sc_stats;
2637 stats->pkts_64 += CSR_READ_4(sc, ET_STAT_PKTS_64);
2638 stats->pkts_65 += CSR_READ_4(sc, ET_STAT_PKTS_65_127);
2639 stats->pkts_128 += CSR_READ_4(sc, ET_STAT_PKTS_128_255);
2640 stats->pkts_256 += CSR_READ_4(sc, ET_STAT_PKTS_256_511);
2641 stats->pkts_512 += CSR_READ_4(sc, ET_STAT_PKTS_512_1023);
2642 stats->pkts_1024 += CSR_READ_4(sc, ET_STAT_PKTS_1024_1518);
2643 stats->pkts_1519 += CSR_READ_4(sc, ET_STAT_PKTS_1519_1522);
2645 stats->rx_bytes += CSR_READ_4(sc, ET_STAT_RX_BYTES);
2646 stats->rx_frames += CSR_READ_4(sc, ET_STAT_RX_FRAMES);
2647 stats->rx_crcerrs += CSR_READ_4(sc, ET_STAT_RX_CRC_ERR);
2648 stats->rx_mcast += CSR_READ_4(sc, ET_STAT_RX_MCAST);
2649 stats->rx_bcast += CSR_READ_4(sc, ET_STAT_RX_BCAST);
2650 stats->rx_control += CSR_READ_4(sc, ET_STAT_RX_CTL);
2651 stats->rx_pause += CSR_READ_4(sc, ET_STAT_RX_PAUSE);
2652 stats->rx_unknown_control += CSR_READ_4(sc, ET_STAT_RX_UNKNOWN_CTL);
2653 stats->rx_alignerrs += CSR_READ_4(sc, ET_STAT_RX_ALIGN_ERR);
2654 stats->rx_lenerrs += CSR_READ_4(sc, ET_STAT_RX_LEN_ERR);
2655 stats->rx_codeerrs += CSR_READ_4(sc, ET_STAT_RX_CODE_ERR);
2656 stats->rx_cserrs += CSR_READ_4(sc, ET_STAT_RX_CS_ERR);
2657 stats->rx_runts += CSR_READ_4(sc, ET_STAT_RX_RUNT);
2658 stats->rx_oversize += CSR_READ_4(sc, ET_STAT_RX_OVERSIZE);
2659 stats->rx_fragments += CSR_READ_4(sc, ET_STAT_RX_FRAG);
2660 stats->rx_jabbers += CSR_READ_4(sc, ET_STAT_RX_JABBER);
2661 stats->rx_drop += CSR_READ_4(sc, ET_STAT_RX_DROP);
2663 stats->tx_bytes += CSR_READ_4(sc, ET_STAT_TX_BYTES);
2664 stats->tx_frames += CSR_READ_4(sc, ET_STAT_TX_FRAMES);
2665 stats->tx_mcast += CSR_READ_4(sc, ET_STAT_TX_MCAST);
2666 stats->tx_bcast += CSR_READ_4(sc, ET_STAT_TX_BCAST);
2667 stats->tx_pause += CSR_READ_4(sc, ET_STAT_TX_PAUSE);
2668 stats->tx_deferred += CSR_READ_4(sc, ET_STAT_TX_DEFER);
2669 stats->tx_excess_deferred += CSR_READ_4(sc, ET_STAT_TX_EXCESS_DEFER);
2670 stats->tx_single_colls += CSR_READ_4(sc, ET_STAT_TX_SINGLE_COL);
2671 stats->tx_multi_colls += CSR_READ_4(sc, ET_STAT_TX_MULTI_COL);
2672 stats->tx_late_colls += CSR_READ_4(sc, ET_STAT_TX_LATE_COL);
2673 stats->tx_excess_colls += CSR_READ_4(sc, ET_STAT_TX_EXCESS_COL);
2674 stats->tx_total_colls += CSR_READ_4(sc, ET_STAT_TX_TOTAL_COL);
2675 stats->tx_pause_honored += CSR_READ_4(sc, ET_STAT_TX_PAUSE_HONOR);
2676 stats->tx_drop += CSR_READ_4(sc, ET_STAT_TX_DROP);
2677 stats->tx_jabbers += CSR_READ_4(sc, ET_STAT_TX_JABBER);
2678 stats->tx_crcerrs += CSR_READ_4(sc, ET_STAT_TX_CRC_ERR);
2679 stats->tx_control += CSR_READ_4(sc, ET_STAT_TX_CTL);
2680 stats->tx_oversize += CSR_READ_4(sc, ET_STAT_TX_OVERSIZE);
2681 stats->tx_undersize += CSR_READ_4(sc, ET_STAT_TX_UNDERSIZE);
2682 stats->tx_fragments += CSR_READ_4(sc, ET_STAT_TX_FRAG);
2686 et_get_counter(struct ifnet *ifp, ift_counter cnt)
2688 struct et_softc *sc;
2689 struct et_hw_stats *stats;
2691 sc = if_getsoftc(ifp);
2692 stats = &sc->sc_stats;
2695 case IFCOUNTER_OPACKETS:
2696 return (stats->tx_frames);
2697 case IFCOUNTER_COLLISIONS:
2698 return (stats->tx_total_colls);
2699 case IFCOUNTER_OERRORS:
2700 return (stats->tx_drop + stats->tx_jabbers +
2701 stats->tx_crcerrs + stats->tx_excess_deferred +
2702 stats->tx_late_colls);
2703 case IFCOUNTER_IPACKETS:
2704 return (stats->rx_frames);
2705 case IFCOUNTER_IERRORS:
2706 return (stats->rx_crcerrs + stats->rx_alignerrs +
2707 stats->rx_lenerrs + stats->rx_codeerrs + stats->rx_cserrs +
2708 stats->rx_runts + stats->rx_jabbers + stats->rx_drop);
2710 return (if_get_counter_default(ifp, cnt));
2715 et_suspend(device_t dev)
2717 struct et_softc *sc;
2720 sc = device_get_softc(dev);
2722 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2724 /* Diable all clocks and put PHY into COMA. */
2725 pmcfg = CSR_READ_4(sc, ET_PM);
2726 pmcfg &= ~(EM_PM_GIGEPHY_ENB | ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE |
2728 pmcfg |= ET_PM_PHY_SW_COMA;
2729 CSR_WRITE_4(sc, ET_PM, pmcfg);
2735 et_resume(device_t dev)
2737 struct et_softc *sc;
2740 sc = device_get_softc(dev);
2742 /* Take PHY out of COMA and enable clocks. */
2743 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
2744 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
2745 pmcfg |= EM_PM_GIGEPHY_ENB;
2746 CSR_WRITE_4(sc, ET_PM, pmcfg);
2747 if ((sc->ifp->if_flags & IFF_UP) != 0)