2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5 * Copyright (C) 2009-2015 Semihalf
6 * Copyright (C) 2015 Stormshield
9 * Developed by Semihalf.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of MARVELL nor the names of contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/endian.h>
48 #include <sys/mutex.h>
49 #include <sys/kernel.h>
50 #include <sys/module.h>
51 #include <sys/socket.h>
52 #include <sys/sysctl.h>
54 #include <net/ethernet.h>
57 #include <net/if_arp.h>
58 #include <net/if_dl.h>
59 #include <net/if_media.h>
60 #include <net/if_types.h>
61 #include <net/if_vlan_var.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in.h>
65 #include <netinet/ip.h>
67 #include <sys/sockio.h>
69 #include <machine/bus.h>
71 #include <machine/resource.h>
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
76 #include <dev/fdt/fdt_common.h>
77 #include <dev/ofw/ofw_bus.h>
78 #include <dev/ofw/ofw_bus_subr.h>
79 #include <dev/mdio/mdio.h>
81 #include <dev/mge/if_mgevar.h>
82 #include <arm/mv/mvreg.h>
83 #include <arm/mv/mvvar.h>
85 #include "miibus_if.h"
88 #define MGE_DELAY(x) pause("SMI access sleep", (x) / tick_sbt)
90 static int mge_probe(device_t dev);
91 static int mge_attach(device_t dev);
92 static int mge_detach(device_t dev);
93 static int mge_shutdown(device_t dev);
94 static int mge_suspend(device_t dev);
95 static int mge_resume(device_t dev);
97 static int mge_miibus_readreg(device_t dev, int phy, int reg);
98 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
100 static int mge_mdio_readreg(device_t dev, int phy, int reg);
101 static int mge_mdio_writereg(device_t dev, int phy, int reg, int value);
103 static int mge_ifmedia_upd(struct ifnet *ifp);
104 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
106 static void mge_init(void *arg);
107 static void mge_init_locked(void *arg);
108 static void mge_start(struct ifnet *ifp);
109 static void mge_start_locked(struct ifnet *ifp);
110 static void mge_watchdog(struct mge_softc *sc);
111 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
113 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
114 static uint32_t mge_rx_ipg(uint32_t val, int ver);
115 static void mge_ver_params(struct mge_softc *sc);
117 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
118 static void mge_intr_rxtx(void *arg);
119 static void mge_intr_rx(void *arg);
120 static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
121 uint32_t int_cause_ext);
122 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
123 static void mge_intr_tx(void *arg);
124 static void mge_intr_tx_locked(struct mge_softc *sc);
125 static void mge_intr_misc(void *arg);
126 static void mge_intr_sum(void *arg);
127 static void mge_intr_err(void *arg);
128 static void mge_stop(struct mge_softc *sc);
129 static void mge_tick(void *msc);
130 static uint32_t mge_set_port_serial_control(uint32_t media);
131 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
132 static void mge_set_mac_address(struct mge_softc *sc);
133 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
135 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
136 static int mge_allocate_dma(struct mge_softc *sc);
137 static int mge_alloc_desc_dma(struct mge_softc *sc,
138 struct mge_desc_wrapper* desc_tab, uint32_t size,
139 bus_dma_tag_t *buffer_tag);
140 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
141 struct mbuf **mbufp, bus_addr_t *paddr);
142 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg,
144 static void mge_free_dma(struct mge_softc *sc);
145 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
146 uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
147 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
148 uint32_t status, uint16_t bufsize);
149 static void mge_offload_setup_descriptor(struct mge_softc *sc,
150 struct mge_desc_wrapper *dw);
151 static uint8_t mge_crc8(uint8_t *data, int size);
152 static void mge_setup_multicast(struct mge_softc *sc);
153 static void mge_set_rxic(struct mge_softc *sc);
154 static void mge_set_txic(struct mge_softc *sc);
155 static void mge_add_sysctls(struct mge_softc *sc);
156 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
158 static device_method_t mge_methods[] = {
159 /* Device interface */
160 DEVMETHOD(device_probe, mge_probe),
161 DEVMETHOD(device_attach, mge_attach),
162 DEVMETHOD(device_detach, mge_detach),
163 DEVMETHOD(device_shutdown, mge_shutdown),
164 DEVMETHOD(device_suspend, mge_suspend),
165 DEVMETHOD(device_resume, mge_resume),
167 DEVMETHOD(miibus_readreg, mge_miibus_readreg),
168 DEVMETHOD(miibus_writereg, mge_miibus_writereg),
170 DEVMETHOD(mdio_readreg, mge_mdio_readreg),
171 DEVMETHOD(mdio_writereg, mge_mdio_writereg),
175 DEFINE_CLASS_0(mge, mge_driver, mge_methods, sizeof(struct mge_softc));
177 static devclass_t mge_devclass;
178 static int switch_attached = 0;
180 DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0);
181 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
182 DRIVER_MODULE(mdio, mge, mdio_driver, mdio_devclass, 0, 0);
183 MODULE_DEPEND(mge, ether, 1, 1, 1);
184 MODULE_DEPEND(mge, miibus, 1, 1, 1);
185 MODULE_DEPEND(mge, mdio, 1, 1, 1);
187 static struct resource_spec res_spec[] = {
188 { SYS_RES_MEMORY, 0, RF_ACTIVE },
189 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
190 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
191 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
196 driver_intr_t *handler;
198 } mge_intrs[MGE_INTR_COUNT + 1] = {
199 { mge_intr_rxtx,"GbE aggregated interrupt" },
200 { mge_intr_rx, "GbE receive interrupt" },
201 { mge_intr_tx, "GbE transmit interrupt" },
202 { mge_intr_misc,"GbE misc interrupt" },
203 { mge_intr_sum, "GbE summary interrupt" },
204 { mge_intr_err, "GbE error interrupt" },
207 /* SMI access interlock */
208 static struct sx sx_smi;
211 mv_read_ge_smi(device_t dev, int phy, int reg)
215 struct mge_softc *sc;
217 sc = device_get_softc(dev);
218 KASSERT(sc != NULL, ("NULL softc ptr!"));
219 timeout = MGE_SMI_WRITE_RETRIES;
223 (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
224 MGE_DELAY(MGE_SMI_WRITE_DELAY);
227 device_printf(dev, "SMI write timeout.\n");
232 MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
233 (MGE_SMI_READ | (reg << 21) | (phy << 16)));
235 /* Wait till finished. */
236 timeout = MGE_SMI_WRITE_RETRIES;
238 !((MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_READVALID)))
239 MGE_DELAY(MGE_SMI_WRITE_DELAY);
242 device_printf(dev, "SMI write validation timeout.\n");
247 /* Wait for the data to update in the SMI register */
248 MGE_DELAY(MGE_SMI_DELAY);
249 ret = MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
258 mv_write_ge_smi(device_t dev, int phy, int reg, uint32_t value)
261 struct mge_softc *sc;
263 sc = device_get_softc(dev);
264 KASSERT(sc != NULL, ("NULL softc ptr!"));
267 timeout = MGE_SMI_READ_RETRIES;
269 (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
270 MGE_DELAY(MGE_SMI_READ_DELAY);
273 device_printf(dev, "SMI read timeout.\n");
277 MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
278 (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
279 (value & MGE_SMI_DATA_MASK)));
286 mv_read_ext_phy(device_t dev, int phy, int reg)
289 struct mge_softc *sc;
292 sc = device_get_softc(dev);
295 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
296 (MGE_SMI_READ | (reg << 21) | (phy << 16)));
298 retries = MGE_SMI_READ_RETRIES;
300 !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
301 DELAY(MGE_SMI_READ_DELAY);
304 device_printf(dev, "Timeout while reading from PHY\n");
306 ret = MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
313 mv_write_ext_phy(device_t dev, int phy, int reg, int value)
316 struct mge_softc *sc;
318 sc = device_get_softc(dev);
321 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
322 (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
323 (value & MGE_SMI_DATA_MASK)));
325 retries = MGE_SMI_WRITE_RETRIES;
326 while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
327 DELAY(MGE_SMI_WRITE_DELAY);
330 device_printf(dev, "Timeout while writing to PHY\n");
335 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
337 uint32_t mac_l, mac_h;
342 * Retrieve hw address from the device tree.
344 i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
347 for (i = 0; i < 6; i++)
354 bcopy(lmac, addr, 6);
360 * Fall back -- use the currently programmed address.
362 mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
363 mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
365 addr[0] = (mac_h & 0xff000000) >> 24;
366 addr[1] = (mac_h & 0x00ff0000) >> 16;
367 addr[2] = (mac_h & 0x0000ff00) >> 8;
368 addr[3] = (mac_h & 0x000000ff);
369 addr[4] = (mac_l & 0x0000ff00) >> 8;
370 addr[5] = (mac_l & 0x000000ff);
374 mge_tfut_ipg(uint32_t val, int ver)
379 return ((val & 0x3fff) << 4);
382 return ((val & 0xffff) << 4);
387 mge_rx_ipg(uint32_t val, int ver)
392 return ((val & 0x3fff) << 8);
395 return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
400 mge_ver_params(struct mge_softc *sc)
405 if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
406 d == MV_DEV_88F6282 ||
407 d == MV_DEV_MV78100 ||
408 d == MV_DEV_MV78100_Z0 ||
409 (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
412 sc->mge_tfut_ipg_max = 0xFFFF;
413 sc->mge_rx_ipg_max = 0xFFFF;
414 sc->mge_tx_arb_cfg = 0xFC0000FF;
415 sc->mge_tx_tok_cfg = 0xFFFF7FFF;
416 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
420 sc->mge_tfut_ipg_max = 0x3FFF;
421 sc->mge_rx_ipg_max = 0x3FFF;
422 sc->mge_tx_arb_cfg = 0x000000FF;
423 sc->mge_tx_tok_cfg = 0x3FFFFFFF;
424 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
426 if (d == MV_DEV_88RC8180)
427 sc->mge_intr_cnt = 1;
429 sc->mge_intr_cnt = 2;
431 if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
438 mge_set_mac_address(struct mge_softc *sc)
441 uint32_t mac_l, mac_h;
443 MGE_GLOBAL_LOCK_ASSERT(sc);
445 if_mac = (char *)IF_LLADDR(sc->ifp);
447 mac_l = (if_mac[4] << 8) | (if_mac[5]);
448 mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
449 (if_mac[2] << 8) | (if_mac[3] << 0);
451 MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
452 MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
454 mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
458 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
460 uint32_t reg_idx, reg_off, reg_val, i;
463 reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
464 reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
465 reg_val = (1 | (queue << 1)) << reg_off;
467 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
469 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
471 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
476 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
478 uint32_t port_config;
481 /* Enable or disable promiscuous mode as needed */
482 if (sc->ifp->if_flags & IFF_PROMISC) {
483 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
484 port_config |= PORT_CONFIG_UPM;
485 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
487 reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
488 (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
490 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
491 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
492 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
495 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
496 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
499 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
500 port_config &= ~PORT_CONFIG_UPM;
501 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
503 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
504 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
505 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
508 mge_set_mac_address(sc);
513 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
517 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
520 *paddr = segs->ds_addr;
524 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
527 struct mbuf *new_mbuf;
528 bus_dma_segment_t seg[1];
532 KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
534 new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
535 if (new_mbuf == NULL)
537 new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
540 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
541 bus_dmamap_unload(tag, map);
544 error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
546 KASSERT(nsegs == 1, ("Too many segments returned!"));
547 if (nsegs != 1 || error)
548 panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
550 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
553 (*paddr) = seg->ds_addr;
558 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
559 uint32_t size, bus_dma_tag_t *buffer_tag)
561 struct mge_desc_wrapper *dw;
562 bus_addr_t desc_paddr;
566 for (i = size - 1; i >= 0; i--) {
568 error = bus_dmamem_alloc(sc->mge_desc_dtag,
569 (void**)&(dw->mge_desc),
570 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
574 if_printf(sc->ifp, "failed to allocate DMA memory\n");
579 error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
580 dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
581 &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
584 if_printf(sc->ifp, "can't load descriptor\n");
585 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
591 /* Chain descriptors */
592 dw->mge_desc->next_desc = desc_paddr;
593 desc_paddr = dw->mge_desc_paddr;
595 tab[size - 1].mge_desc->next_desc = desc_paddr;
597 /* Allocate a busdma tag for mbufs. */
598 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
599 1, 0, /* alignment, boundary */
600 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
601 BUS_SPACE_MAXADDR, /* highaddr */
602 NULL, NULL, /* filtfunc, filtfuncarg */
603 MCLBYTES, 1, /* maxsize, nsegments */
604 MCLBYTES, 0, /* maxsegsz, flags */
605 NULL, NULL, /* lockfunc, lockfuncarg */
606 buffer_tag); /* dmat */
608 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
612 /* Create TX busdma maps */
613 for (i = 0; i < size; i++) {
615 error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
617 if_printf(sc->ifp, "failed to create map for mbuf\n");
621 dw->buffer = (struct mbuf*)NULL;
622 dw->mge_desc->buffer = (bus_addr_t)NULL;
629 mge_allocate_dma(struct mge_softc *sc)
632 struct mge_desc_wrapper *dw;
635 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
636 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
637 16, 0, /* alignment, boundary */
638 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
639 BUS_SPACE_MAXADDR, /* highaddr */
640 NULL, NULL, /* filtfunc, filtfuncarg */
641 sizeof(struct mge_desc), 1, /* maxsize, nsegments */
642 sizeof(struct mge_desc), 0, /* maxsegsz, flags */
643 NULL, NULL, /* lockfunc, lockfuncarg */
644 &sc->mge_desc_dtag); /* dmat */
647 mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
649 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
652 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
653 dw = &(sc->mge_rx_desc[i]);
654 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
655 &dw->mge_desc->buffer);
658 sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
659 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
665 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
666 uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
668 struct mge_desc_wrapper *dw;
671 for (i = 0; i < size; i++) {
675 if (dw->buffer_dmap) {
677 bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
678 BUS_DMASYNC_POSTREAD);
679 bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
681 bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
685 /* Free RX descriptors */
687 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
688 BUS_DMASYNC_POSTREAD);
689 bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
690 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
697 mge_free_dma(struct mge_softc *sc)
700 /* Free desciptors and mbufs */
701 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
702 mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
704 /* Destroy mbuf dma tag */
705 bus_dma_tag_destroy(sc->mge_tx_dtag);
706 bus_dma_tag_destroy(sc->mge_rx_dtag);
707 /* Destroy descriptors tag */
708 bus_dma_tag_destroy(sc->mge_desc_dtag);
712 mge_reinit_rx(struct mge_softc *sc)
714 struct mge_desc_wrapper *dw;
717 MGE_RECEIVE_LOCK_ASSERT(sc);
719 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
721 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
724 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
725 dw = &(sc->mge_rx_desc[i]);
726 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
727 &dw->mge_desc->buffer);
730 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
731 sc->rx_desc_curr = 0;
733 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
736 /* Enable RX queue */
737 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
740 #ifdef DEVICE_POLLING
741 static poll_handler_t mge_poll;
744 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
746 struct mge_softc *sc = ifp->if_softc;
747 uint32_t int_cause, int_cause_ext;
750 MGE_RECEIVE_LOCK(sc);
752 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
753 MGE_RECEIVE_UNLOCK(sc);
757 if (cmd == POLL_AND_CHECK_STATUS) {
758 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
759 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
761 /* Check for resource error */
762 if (int_cause & MGE_PORT_INT_RXERRQ0)
765 if (int_cause || int_cause_ext) {
766 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
767 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
772 rx_npkts = mge_intr_rx_locked(sc, count);
774 MGE_RECEIVE_UNLOCK(sc);
775 MGE_TRANSMIT_LOCK(sc);
776 mge_intr_tx_locked(sc);
777 MGE_TRANSMIT_UNLOCK(sc);
780 #endif /* DEVICE_POLLING */
783 mge_attach(device_t dev)
785 struct mge_softc *sc;
786 struct mii_softc *miisc;
788 uint8_t hwaddr[ETHER_ADDR_LEN];
791 sc = device_get_softc(dev);
793 sc->node = ofw_bus_get_node(dev);
796 if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) == 0) {
797 device_printf(dev, "PHY%i attached, phy_sc points to %s\n", phy,
798 device_get_nameunit(sc->phy_sc->dev));
799 sc->phy_attached = 1;
801 device_printf(dev, "PHY not attached.\n");
802 sc->phy_attached = 0;
806 if (fdt_find_compatible(sc->node, "mrvl,sw", 1) != 0) {
807 device_printf(dev, "Switch attached.\n");
808 sc->switch_attached = 1;
809 /* additional variable available across instances */
812 sc->switch_attached = 0;
815 if (device_get_unit(dev) == 0) {
816 sx_init(&sx_smi, "mge_tick() SMI access threads interlock");
819 /* Set chip version-dependent parameters */
822 /* Initialize mutexes */
823 mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock",
825 mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock",
828 /* Allocate IO and IRQ resources */
829 error = bus_alloc_resources(dev, res_spec, sc->res);
831 device_printf(dev, "could not allocate resources\n");
836 /* Allocate DMA, buffers, buffer descriptors */
837 error = mge_allocate_dma(sc);
843 sc->tx_desc_curr = 0;
844 sc->rx_desc_curr = 0;
845 sc->tx_desc_used_idx = 0;
846 sc->tx_desc_used_count = 0;
848 /* Configure defaults for interrupts coalescing */
849 sc->rx_ic_time = 768;
850 sc->tx_ic_time = 768;
853 /* Allocate network interface */
854 ifp = sc->ifp = if_alloc(IFT_ETHER);
856 device_printf(dev, "if_alloc() failed\n");
861 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
863 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
864 ifp->if_capabilities = IFCAP_VLAN_MTU;
865 if (sc->mge_hw_csum) {
866 ifp->if_capabilities |= IFCAP_HWCSUM;
867 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
869 ifp->if_capenable = ifp->if_capabilities;
871 #ifdef DEVICE_POLLING
872 /* Advertise that polling is supported */
873 ifp->if_capabilities |= IFCAP_POLLING;
876 ifp->if_init = mge_init;
877 ifp->if_start = mge_start;
878 ifp->if_ioctl = mge_ioctl;
880 ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
881 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
882 IFQ_SET_READY(&ifp->if_snd);
884 mge_get_mac_address(sc, hwaddr);
885 ether_ifattach(ifp, hwaddr);
886 callout_init(&sc->wd_callout, 0);
889 if (sc->phy_attached) {
890 error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
891 mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
893 device_printf(dev, "MII failed to find PHY\n");
899 sc->mii = device_get_softc(sc->miibus);
901 /* Tell the MAC where to find the PHY so autoneg works */
902 miisc = LIST_FIRST(&sc->mii->mii_phys);
903 MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
905 /* no PHY, so use hard-coded values */
906 ifmedia_init(&sc->mge_ifmedia, 0,
909 ifmedia_add(&sc->mge_ifmedia,
910 IFM_ETHER | IFM_1000_T | IFM_FDX,
912 ifmedia_set(&sc->mge_ifmedia,
913 IFM_ETHER | IFM_1000_T | IFM_FDX);
916 /* Attach interrupt handlers */
917 /* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
918 for (i = 1; i <= sc->mge_intr_cnt; ++i) {
919 error = bus_setup_intr(dev, sc->res[i],
920 INTR_TYPE_NET | INTR_MPSAFE,
921 NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
922 sc, &sc->ih_cookie[i - 1]);
924 device_printf(dev, "could not setup %s\n",
925 mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
931 if (sc->switch_attached) {
933 MGE_WRITE(sc, MGE_REG_PHYDEV, MGE_SWITCH_PHYDEV);
934 child = device_add_child(dev, "mdio", -1);
935 bus_generic_attach(dev);
942 mge_detach(device_t dev)
944 struct mge_softc *sc;
947 sc = device_get_softc(dev);
949 /* Stop controller and free TX queue */
953 /* Wait for stopping ticks */
954 callout_drain(&sc->wd_callout);
956 /* Stop and release all interrupts */
957 for (i = 0; i < sc->mge_intr_cnt; ++i) {
958 if (!sc->ih_cookie[i])
961 error = bus_teardown_intr(dev, sc->res[1 + i],
964 device_printf(dev, "could not release %s\n",
965 mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
968 /* Detach network interface */
970 ether_ifdetach(sc->ifp);
974 /* Free DMA resources */
977 /* Free IO memory handler */
978 bus_release_resources(dev, res_spec, sc->res);
980 /* Destroy mutexes */
981 mtx_destroy(&sc->receive_lock);
982 mtx_destroy(&sc->transmit_lock);
984 if (device_get_unit(dev) == 0)
991 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
993 struct mge_softc *sc;
994 struct mii_data *mii;
999 if (!sc->phy_attached) {
1000 ifmr->ifm_active = IFM_1000_T | IFM_FDX | IFM_ETHER;
1001 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1008 ifmr->ifm_active = mii->mii_media_active;
1009 ifmr->ifm_status = mii->mii_media_status;
1012 MGE_GLOBAL_UNLOCK(sc);
1016 mge_set_port_serial_control(uint32_t media)
1018 uint32_t port_config;
1020 port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
1021 PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
1023 if (IFM_TYPE(media) == IFM_ETHER) {
1024 switch(IFM_SUBTYPE(media)) {
1028 port_config |= (PORT_SERIAL_GMII_SPEED_1000 |
1029 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1030 | PORT_SERIAL_SPEED_AUTONEG);
1033 port_config |= (PORT_SERIAL_MII_SPEED_100 |
1034 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1035 | PORT_SERIAL_SPEED_AUTONEG);
1038 port_config |= (PORT_SERIAL_AUTONEG |
1039 PORT_SERIAL_AUTONEG_FC |
1040 PORT_SERIAL_SPEED_AUTONEG);
1043 if (media & IFM_FDX)
1044 port_config |= PORT_SERIAL_FULL_DUPLEX;
1046 return (port_config);
1050 mge_ifmedia_upd(struct ifnet *ifp)
1052 struct mge_softc *sc = ifp->if_softc;
1055 * Do not do anything for switch here, as updating media between
1056 * MGE MAC and switch MAC is hardcoded in PCB. Changing it here would
1059 if (sc->phy_attached) {
1060 MGE_GLOBAL_LOCK(sc);
1061 if (ifp->if_flags & IFF_UP) {
1062 sc->mge_media_status = sc->mii->mii_media.ifm_media;
1063 mii_mediachg(sc->mii);
1065 /* MGE MAC needs to be reinitialized. */
1066 mge_init_locked(sc);
1069 MGE_GLOBAL_UNLOCK(sc);
1078 struct mge_softc *sc;
1081 MGE_GLOBAL_LOCK(sc);
1083 mge_init_locked(arg);
1085 MGE_GLOBAL_UNLOCK(sc);
1089 mge_init_locked(void *arg)
1091 struct mge_softc *sc = arg;
1092 struct mge_desc_wrapper *dw;
1093 volatile uint32_t reg_val;
1095 uint32_t media_status;
1098 MGE_GLOBAL_LOCK_ASSERT(sc);
1100 /* Stop interface */
1103 /* Disable interrupts */
1104 mge_intrs_ctrl(sc, 0);
1106 /* Set MAC address */
1107 mge_set_mac_address(sc);
1109 /* Setup multicast filters */
1110 mge_setup_multicast(sc);
1112 if (sc->mge_ver == 2) {
1113 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
1114 MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
1117 /* Initialize TX queue configuration registers */
1118 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
1119 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
1120 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
1122 /* Clear TX queue configuration registers for unused queues */
1123 for (i = 1; i < 7; i++) {
1124 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
1125 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
1126 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
1129 /* Set default MTU */
1130 MGE_WRITE(sc, sc->mge_mtu, 0);
1132 /* Port configuration */
1133 MGE_WRITE(sc, MGE_PORT_CONFIG,
1134 PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
1135 PORT_CONFIG_ARO_RXQ(0));
1136 MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
1138 /* Configure promisc mode */
1139 mge_set_prom_mode(sc, MGE_RX_DEFAULT_QUEUE);
1141 media_status = sc->mge_media_status;
1142 if (sc->switch_attached) {
1143 media_status &= ~IFM_TMASK;
1144 media_status |= IFM_1000_T;
1147 /* Setup port configuration */
1148 reg_val = mge_set_port_serial_control(media_status);
1149 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1151 /* Setup SDMA configuration */
1152 MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
1153 MGE_SDMA_TX_BYTE_SWAP |
1154 MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
1155 MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
1157 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
1159 MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
1160 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
1163 /* Reset descriptor indexes */
1164 sc->tx_desc_curr = 0;
1165 sc->rx_desc_curr = 0;
1166 sc->tx_desc_used_idx = 0;
1167 sc->tx_desc_used_count = 0;
1169 /* Enable RX descriptors */
1170 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
1171 dw = &sc->mge_rx_desc[i];
1172 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1173 dw->mge_desc->buff_size = MCLBYTES;
1174 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1175 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1178 /* Enable RX queue */
1179 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
1182 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1183 reg_val |= PORT_SERIAL_ENABLE;
1184 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1187 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1188 if (reg_val & MGE_STATUS_LINKUP)
1192 if_printf(sc->ifp, "Timeout on link-up\n");
1197 /* Setup interrupts coalescing */
1201 /* Enable interrupts */
1202 #ifdef DEVICE_POLLING
1204 * * ...only if polling is not turned on. Disable interrupts explicitly
1205 * if polling is enabled.
1207 if (sc->ifp->if_capenable & IFCAP_POLLING)
1208 mge_intrs_ctrl(sc, 0);
1210 #endif /* DEVICE_POLLING */
1211 mge_intrs_ctrl(sc, 1);
1213 /* Activate network interface */
1214 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1215 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1218 /* Schedule watchdog timeout */
1219 if (sc->phy_attached)
1220 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1224 mge_intr_rxtx(void *arg)
1226 struct mge_softc *sc;
1227 uint32_t int_cause, int_cause_ext;
1230 MGE_GLOBAL_LOCK(sc);
1232 #ifdef DEVICE_POLLING
1233 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1234 MGE_GLOBAL_UNLOCK(sc);
1239 /* Get interrupt cause */
1240 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1241 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1243 /* Check for Transmit interrupt */
1244 if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
1245 MGE_PORT_INT_EXT_TXUR)) {
1246 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1247 (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1248 mge_intr_tx_locked(sc);
1251 MGE_TRANSMIT_UNLOCK(sc);
1253 /* Check for Receive interrupt */
1254 mge_intr_rx_check(sc, int_cause, int_cause_ext);
1256 MGE_RECEIVE_UNLOCK(sc);
1260 mge_intr_err(void *arg)
1262 struct mge_softc *sc;
1267 if_printf(ifp, "%s\n", __FUNCTION__);
1271 mge_intr_misc(void *arg)
1273 struct mge_softc *sc;
1278 if_printf(ifp, "%s\n", __FUNCTION__);
1282 mge_intr_rx(void *arg) {
1283 struct mge_softc *sc;
1284 uint32_t int_cause, int_cause_ext;
1287 MGE_RECEIVE_LOCK(sc);
1289 #ifdef DEVICE_POLLING
1290 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1291 MGE_RECEIVE_UNLOCK(sc);
1296 /* Get interrupt cause */
1297 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1298 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1300 mge_intr_rx_check(sc, int_cause, int_cause_ext);
1302 MGE_RECEIVE_UNLOCK(sc);
1306 mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
1307 uint32_t int_cause_ext)
1309 /* Check for resource error */
1310 if (int_cause & MGE_PORT_INT_RXERRQ0) {
1312 MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1313 ~(int_cause & MGE_PORT_INT_RXERRQ0));
1316 int_cause &= MGE_PORT_INT_RXQ0;
1317 int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1319 if (int_cause || int_cause_ext) {
1320 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1321 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1322 mge_intr_rx_locked(sc, -1);
1327 mge_intr_rx_locked(struct mge_softc *sc, int count)
1329 struct ifnet *ifp = sc->ifp;
1332 struct mge_desc_wrapper* dw;
1336 MGE_RECEIVE_LOCK_ASSERT(sc);
1338 while (count != 0) {
1339 dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1340 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1341 BUS_DMASYNC_POSTREAD);
1344 status = dw->mge_desc->cmd_status;
1345 bufsize = dw->mge_desc->buff_size;
1346 if ((status & MGE_DMA_OWNED) != 0)
1349 if (dw->mge_desc->byte_count &&
1350 ~(status & MGE_ERR_SUMMARY)) {
1352 bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1353 BUS_DMASYNC_POSTREAD);
1355 mb = m_devget(dw->buffer->m_data,
1356 dw->mge_desc->byte_count - ETHER_CRC_LEN,
1360 /* Give up if no mbufs */
1364 mb->m_pkthdr.len -= 2;
1367 mb->m_pkthdr.rcvif = ifp;
1369 mge_offload_process_frame(ifp, mb, status,
1372 MGE_RECEIVE_UNLOCK(sc);
1373 (*ifp->if_input)(ifp, mb);
1374 MGE_RECEIVE_LOCK(sc);
1378 dw->mge_desc->byte_count = 0;
1379 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1380 sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1381 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1382 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1388 if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_npkts);
1394 mge_intr_sum(void *arg)
1396 struct mge_softc *sc = arg;
1400 if_printf(ifp, "%s\n", __FUNCTION__);
1404 mge_intr_tx(void *arg)
1406 struct mge_softc *sc = arg;
1407 uint32_t int_cause_ext;
1409 MGE_TRANSMIT_LOCK(sc);
1411 #ifdef DEVICE_POLLING
1412 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1413 MGE_TRANSMIT_UNLOCK(sc);
1418 /* Ack the interrupt */
1419 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1420 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1421 (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1423 mge_intr_tx_locked(sc);
1425 MGE_TRANSMIT_UNLOCK(sc);
1429 mge_intr_tx_locked(struct mge_softc *sc)
1431 struct ifnet *ifp = sc->ifp;
1432 struct mge_desc_wrapper *dw;
1433 struct mge_desc *desc;
1437 MGE_TRANSMIT_LOCK_ASSERT(sc);
1439 /* Disable watchdog */
1442 while (sc->tx_desc_used_count) {
1443 /* Get the descriptor */
1444 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1445 desc = dw->mge_desc;
1446 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1447 BUS_DMASYNC_POSTREAD);
1449 /* Get descriptor status */
1450 status = desc->cmd_status;
1452 if (status & MGE_DMA_OWNED)
1455 sc->tx_desc_used_idx =
1456 (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1457 sc->tx_desc_used_count--;
1459 /* Update collision statistics */
1460 if (status & MGE_ERR_SUMMARY) {
1461 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1462 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1463 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1464 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16);
1467 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1468 BUS_DMASYNC_POSTWRITE);
1469 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1470 m_freem(dw->buffer);
1471 dw->buffer = (struct mbuf*)NULL;
1474 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1478 /* Now send anything that was pending */
1479 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1480 mge_start_locked(ifp);
1484 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1486 struct mge_softc *sc = ifp->if_softc;
1487 struct ifreq *ifr = (struct ifreq *)data;
1495 MGE_GLOBAL_LOCK(sc);
1497 if (ifp->if_flags & IFF_UP) {
1498 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1499 flags = ifp->if_flags ^ sc->mge_if_flags;
1500 if (flags & IFF_PROMISC)
1501 mge_set_prom_mode(sc,
1502 MGE_RX_DEFAULT_QUEUE);
1504 if (flags & IFF_ALLMULTI)
1505 mge_setup_multicast(sc);
1507 mge_init_locked(sc);
1509 else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1512 sc->mge_if_flags = ifp->if_flags;
1513 MGE_GLOBAL_UNLOCK(sc);
1517 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1518 MGE_GLOBAL_LOCK(sc);
1519 mge_setup_multicast(sc);
1520 MGE_GLOBAL_UNLOCK(sc);
1524 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1525 if (mask & IFCAP_HWCSUM) {
1526 ifp->if_capenable &= ~IFCAP_HWCSUM;
1527 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1528 if (ifp->if_capenable & IFCAP_TXCSUM)
1529 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1531 ifp->if_hwassist = 0;
1533 #ifdef DEVICE_POLLING
1534 if (mask & IFCAP_POLLING) {
1535 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1536 error = ether_poll_register(mge_poll, ifp);
1540 MGE_GLOBAL_LOCK(sc);
1541 mge_intrs_ctrl(sc, 0);
1542 ifp->if_capenable |= IFCAP_POLLING;
1543 MGE_GLOBAL_UNLOCK(sc);
1545 error = ether_poll_deregister(ifp);
1546 MGE_GLOBAL_LOCK(sc);
1547 mge_intrs_ctrl(sc, 1);
1548 ifp->if_capenable &= ~IFCAP_POLLING;
1549 MGE_GLOBAL_UNLOCK(sc);
1554 case SIOCGIFMEDIA: /* fall through */
1557 * Setting up media type via ioctls is *not* supported for MAC
1558 * which is connected to switch. Use etherswitchcfg.
1560 if (!sc->phy_attached && (command == SIOCSIFMEDIA))
1562 else if (!sc->phy_attached) {
1563 error = ifmedia_ioctl(ifp, ifr, &sc->mge_ifmedia,
1568 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1569 && !(ifr->ifr_media & IFM_FDX)) {
1570 device_printf(sc->dev,
1571 "1000baseTX half-duplex unsupported\n");
1574 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1577 error = ether_ioctl(ifp, command, data);
1583 mge_miibus_readreg(device_t dev, int phy, int reg)
1585 struct mge_softc *sc;
1586 sc = device_get_softc(dev);
1588 KASSERT(!switch_attached, ("miibus used with switch attached"));
1590 return (mv_read_ext_phy(dev, phy, reg));
1594 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1596 struct mge_softc *sc;
1597 sc = device_get_softc(dev);
1599 KASSERT(!switch_attached, ("miibus used with switch attached"));
1601 mv_write_ext_phy(dev, phy, reg, value);
1607 mge_probe(device_t dev)
1610 if (!ofw_bus_status_okay(dev))
1613 if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1616 device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1617 return (BUS_PROBE_DEFAULT);
1621 mge_resume(device_t dev)
1624 device_printf(dev, "%s\n", __FUNCTION__);
1629 mge_shutdown(device_t dev)
1631 struct mge_softc *sc = device_get_softc(dev);
1633 MGE_GLOBAL_LOCK(sc);
1635 #ifdef DEVICE_POLLING
1636 if (sc->ifp->if_capenable & IFCAP_POLLING)
1637 ether_poll_deregister(sc->ifp);
1642 MGE_GLOBAL_UNLOCK(sc);
1648 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1650 struct mge_desc_wrapper *dw = NULL;
1652 bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1660 /* Fetch unused map */
1661 desc_no = sc->tx_desc_curr;
1662 dw = &sc->mge_tx_desc[desc_no];
1663 mapp = dw->buffer_dmap;
1665 /* Create mapping in DMA memory */
1666 error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1673 /* Only one segment is supported. */
1675 bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1680 bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1682 /* Everything is ok, now we can send buffers */
1683 for (seg = 0; seg < nsegs; seg++) {
1684 dw->mge_desc->byte_count = segs[seg].ds_len;
1685 dw->mge_desc->buffer = segs[seg].ds_addr;
1687 dw->mge_desc->cmd_status = 0;
1689 mge_offload_setup_descriptor(sc, dw);
1690 dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1691 MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1695 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1696 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1698 sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1699 sc->tx_desc_used_count++;
1706 struct mge_softc *sc = msc;
1708 KASSERT(sc->phy_attached == 1, ("mge_tick while PHY not attached"));
1710 MGE_GLOBAL_LOCK(sc);
1712 /* Check for TX timeout */
1717 /* Check for media type change */
1718 if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1719 mge_ifmedia_upd(sc->ifp);
1721 MGE_GLOBAL_UNLOCK(sc);
1723 /* Schedule another timeout one second from now */
1724 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1730 mge_watchdog(struct mge_softc *sc)
1736 if (sc->wd_timer == 0 || --sc->wd_timer) {
1740 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1741 if_printf(ifp, "watchdog timeout\n");
1744 mge_init_locked(sc);
1748 mge_start(struct ifnet *ifp)
1750 struct mge_softc *sc = ifp->if_softc;
1752 MGE_TRANSMIT_LOCK(sc);
1754 mge_start_locked(ifp);
1756 MGE_TRANSMIT_UNLOCK(sc);
1760 mge_start_locked(struct ifnet *ifp)
1762 struct mge_softc *sc;
1763 struct mbuf *m0, *mtmp;
1764 uint32_t reg_val, queued = 0;
1768 MGE_TRANSMIT_LOCK_ASSERT(sc);
1770 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1775 /* Get packet from the queue */
1776 IF_DEQUEUE(&ifp->if_snd, m0);
1780 if (m0->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP) ||
1781 m0->m_flags & M_VLANTAG) {
1782 if (M_WRITABLE(m0) == 0) {
1783 mtmp = m_dup(m0, M_NOWAIT);
1790 /* The driver support only one DMA fragment. */
1791 if (m0->m_next != NULL) {
1792 mtmp = m_defrag(m0, M_NOWAIT);
1797 /* Check for free descriptors */
1798 if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1799 IF_PREPEND(&ifp->if_snd, m0);
1800 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1804 if (mge_encap(sc, m0) != 0)
1812 /* Enable transmitter and watchdog timer */
1813 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1814 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1820 mge_stop(struct mge_softc *sc)
1823 volatile uint32_t reg_val, status;
1824 struct mge_desc_wrapper *dw;
1825 struct mge_desc *desc;
1830 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1833 /* Stop tick engine */
1834 callout_stop(&sc->wd_callout);
1836 /* Disable interface */
1837 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1840 /* Disable interrupts */
1841 mge_intrs_ctrl(sc, 0);
1843 /* Disable Rx and Tx */
1844 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1845 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1846 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1848 /* Remove pending data from TX queue */
1849 while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1850 sc->tx_desc_used_count) {
1851 /* Get the descriptor */
1852 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1853 desc = dw->mge_desc;
1854 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1855 BUS_DMASYNC_POSTREAD);
1857 /* Get descriptor status */
1858 status = desc->cmd_status;
1860 if (status & MGE_DMA_OWNED)
1863 sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1865 sc->tx_desc_used_count--;
1867 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1868 BUS_DMASYNC_POSTWRITE);
1869 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1871 m_freem(dw->buffer);
1872 dw->buffer = (struct mbuf*)NULL;
1875 /* Wait for end of transmission */
1878 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1879 if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1880 (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1887 "%s: timeout while waiting for end of transmission\n",
1890 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1891 reg_val &= ~(PORT_SERIAL_ENABLE);
1892 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1896 mge_suspend(device_t dev)
1899 device_printf(dev, "%s\n", __FUNCTION__);
1904 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1905 uint32_t status, uint16_t bufsize)
1909 if (ifp->if_capenable & IFCAP_RXCSUM) {
1910 if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1911 csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1913 if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1914 (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1915 (status & MGE_RX_L4_CSUM_OK)) {
1916 csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1917 frame->m_pkthdr.csum_data = 0xFFFF;
1920 frame->m_pkthdr.csum_flags = csum_flags;
1925 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1927 struct mbuf *m0 = dw->buffer;
1928 struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1929 int csum_flags = m0->m_pkthdr.csum_flags;
1934 if (csum_flags != 0) {
1935 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1936 etype = ntohs(eh->evl_proto);
1937 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1938 csum_flags |= MGE_TX_VLAN_TAGGED;
1940 etype = ntohs(eh->evl_encap_proto);
1941 ehlen = ETHER_HDR_LEN;
1944 if (etype != ETHERTYPE_IP) {
1946 "TCP/IP Offload enabled for unsupported "
1951 ip = (struct ip *)(m0->m_data + ehlen);
1952 cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1953 cmd_status |= MGE_TX_NOT_FRAGMENT;
1956 if (csum_flags & CSUM_IP)
1957 cmd_status |= MGE_TX_GEN_IP_CSUM;
1959 if (csum_flags & CSUM_TCP)
1960 cmd_status |= MGE_TX_GEN_L4_CSUM;
1962 if (csum_flags & CSUM_UDP)
1963 cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1965 dw->mge_desc->cmd_status |= cmd_status;
1969 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1973 MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1974 MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1975 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1976 MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1977 MGE_PORT_INT_EXT_TXBUF0);
1979 MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1980 MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1982 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1983 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1985 MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1986 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1991 mge_crc8(uint8_t *data, int size)
1994 static const uint8_t ct[256] = {
1995 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1996 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1997 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1998 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1999 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
2000 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
2001 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
2002 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
2003 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
2004 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
2005 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
2006 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
2007 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
2008 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
2009 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
2010 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
2011 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
2012 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
2013 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
2014 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
2015 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
2016 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
2017 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
2018 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
2019 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
2020 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
2021 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
2022 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
2023 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
2024 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
2025 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
2026 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
2030 crc = ct[crc ^ *(data++)];
2036 mge_setup_multicast(struct mge_softc *sc)
2038 uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
2039 uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2040 uint32_t smt[MGE_MCAST_REG_NUMBER];
2041 uint32_t omt[MGE_MCAST_REG_NUMBER];
2042 struct ifnet *ifp = sc->ifp;
2043 struct ifmultiaddr *ifma;
2047 if (ifp->if_flags & IFF_ALLMULTI) {
2048 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
2049 smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
2051 memset(smt, 0, sizeof(smt));
2052 memset(omt, 0, sizeof(omt));
2054 if_maddr_rlock(ifp);
2055 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2056 if (ifma->ifma_addr->sa_family != AF_LINK)
2059 mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2060 if (memcmp(mac, special, sizeof(special)) == 0) {
2062 smt[i >> 2] |= v << ((i & 0x03) << 3);
2064 i = mge_crc8(mac, ETHER_ADDR_LEN);
2065 omt[i >> 2] |= v << ((i & 0x03) << 3);
2068 if_maddr_runlock(ifp);
2071 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
2072 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
2073 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
2078 mge_set_rxic(struct mge_softc *sc)
2082 if (sc->rx_ic_time > sc->mge_rx_ipg_max)
2083 sc->rx_ic_time = sc->mge_rx_ipg_max;
2085 reg = MGE_READ(sc, MGE_SDMA_CONFIG);
2086 reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
2087 reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
2088 MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
2092 mge_set_txic(struct mge_softc *sc)
2096 if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
2097 sc->tx_ic_time = sc->mge_tfut_ipg_max;
2099 reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
2100 reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
2101 reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
2102 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
2106 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
2108 struct mge_softc *sc = (struct mge_softc *)arg1;
2112 time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
2113 error = sysctl_handle_int(oidp, &time, 0, req);
2117 MGE_GLOBAL_LOCK(sc);
2118 if (arg2 == MGE_IC_RX) {
2119 sc->rx_ic_time = time;
2122 sc->tx_ic_time = time;
2125 MGE_GLOBAL_UNLOCK(sc);
2131 mge_add_sysctls(struct mge_softc *sc)
2133 struct sysctl_ctx_list *ctx;
2134 struct sysctl_oid_list *children;
2135 struct sysctl_oid *tree;
2137 ctx = device_get_sysctl_ctx(sc->dev);
2138 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2139 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
2140 CTLFLAG_RD, 0, "MGE Interrupts coalescing");
2141 children = SYSCTL_CHILDREN(tree);
2143 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
2144 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
2145 "I", "IC RX time threshold");
2146 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
2147 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
2148 "I", "IC TX time threshold");
2152 mge_mdio_writereg(device_t dev, int phy, int reg, int value)
2155 mv_write_ge_smi(dev, phy, reg, value);
2162 mge_mdio_readreg(device_t dev, int phy, int reg)
2166 ret = mv_read_ge_smi(dev, phy, reg);