2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3 * Copyright (C) 2009-2015 Semihalf
4 * Copyright (C) 2015 Stormshield
7 * Developed by Semihalf.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of MARVELL nor the names of contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #ifdef HAVE_KERNEL_OPTION_HEADERS
35 #include "opt_device_polling.h"
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/endian.h>
46 #include <sys/mutex.h>
47 #include <sys/kernel.h>
48 #include <sys/module.h>
49 #include <sys/socket.h>
50 #include <sys/sysctl.h>
52 #include <net/ethernet.h>
55 #include <net/if_arp.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_types.h>
59 #include <net/if_vlan_var.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/in.h>
63 #include <netinet/ip.h>
65 #include <sys/sockio.h>
67 #include <machine/bus.h>
69 #include <machine/resource.h>
71 #include <dev/mii/mii.h>
72 #include <dev/mii/miivar.h>
74 #include <dev/fdt/fdt_common.h>
75 #include <dev/ofw/ofw_bus.h>
76 #include <dev/ofw/ofw_bus_subr.h>
77 #include <dev/mdio/mdio.h>
79 #include <dev/mge/if_mgevar.h>
80 #include <arm/mv/mvreg.h>
81 #include <arm/mv/mvvar.h>
83 #include "miibus_if.h"
86 #define MGE_DELAY(x) pause("SMI access sleep", (x) / tick_sbt)
88 static int mge_probe(device_t dev);
89 static int mge_attach(device_t dev);
90 static int mge_detach(device_t dev);
91 static int mge_shutdown(device_t dev);
92 static int mge_suspend(device_t dev);
93 static int mge_resume(device_t dev);
95 static int mge_miibus_readreg(device_t dev, int phy, int reg);
96 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
98 static int mge_mdio_readreg(device_t dev, int phy, int reg);
99 static int mge_mdio_writereg(device_t dev, int phy, int reg, int value);
101 static int mge_ifmedia_upd(struct ifnet *ifp);
102 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
104 static void mge_init(void *arg);
105 static void mge_init_locked(void *arg);
106 static void mge_start(struct ifnet *ifp);
107 static void mge_start_locked(struct ifnet *ifp);
108 static void mge_watchdog(struct mge_softc *sc);
109 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
111 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
112 static uint32_t mge_rx_ipg(uint32_t val, int ver);
113 static void mge_ver_params(struct mge_softc *sc);
115 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
116 static void mge_intr_rxtx(void *arg);
117 static void mge_intr_rx(void *arg);
118 static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
119 uint32_t int_cause_ext);
120 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
121 static void mge_intr_tx(void *arg);
122 static void mge_intr_tx_locked(struct mge_softc *sc);
123 static void mge_intr_misc(void *arg);
124 static void mge_intr_sum(void *arg);
125 static void mge_intr_err(void *arg);
126 static void mge_stop(struct mge_softc *sc);
127 static void mge_tick(void *msc);
128 static uint32_t mge_set_port_serial_control(uint32_t media);
129 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
130 static void mge_set_mac_address(struct mge_softc *sc);
131 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
133 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
134 static int mge_allocate_dma(struct mge_softc *sc);
135 static int mge_alloc_desc_dma(struct mge_softc *sc,
136 struct mge_desc_wrapper* desc_tab, uint32_t size,
137 bus_dma_tag_t *buffer_tag);
138 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
139 struct mbuf **mbufp, bus_addr_t *paddr);
140 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg,
142 static void mge_free_dma(struct mge_softc *sc);
143 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
144 uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
145 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
146 uint32_t status, uint16_t bufsize);
147 static void mge_offload_setup_descriptor(struct mge_softc *sc,
148 struct mge_desc_wrapper *dw);
149 static uint8_t mge_crc8(uint8_t *data, int size);
150 static void mge_setup_multicast(struct mge_softc *sc);
151 static void mge_set_rxic(struct mge_softc *sc);
152 static void mge_set_txic(struct mge_softc *sc);
153 static void mge_add_sysctls(struct mge_softc *sc);
154 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
156 static device_method_t mge_methods[] = {
157 /* Device interface */
158 DEVMETHOD(device_probe, mge_probe),
159 DEVMETHOD(device_attach, mge_attach),
160 DEVMETHOD(device_detach, mge_detach),
161 DEVMETHOD(device_shutdown, mge_shutdown),
162 DEVMETHOD(device_suspend, mge_suspend),
163 DEVMETHOD(device_resume, mge_resume),
165 DEVMETHOD(miibus_readreg, mge_miibus_readreg),
166 DEVMETHOD(miibus_writereg, mge_miibus_writereg),
168 DEVMETHOD(mdio_readreg, mge_mdio_readreg),
169 DEVMETHOD(mdio_writereg, mge_mdio_writereg),
173 DEFINE_CLASS_0(mge, mge_driver, mge_methods, sizeof(struct mge_softc));
175 static devclass_t mge_devclass;
176 static int switch_attached = 0;
178 DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0);
179 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
180 DRIVER_MODULE(mdio, mge, mdio_driver, mdio_devclass, 0, 0);
181 MODULE_DEPEND(mge, ether, 1, 1, 1);
182 MODULE_DEPEND(mge, miibus, 1, 1, 1);
183 MODULE_DEPEND(mge, mdio, 1, 1, 1);
185 static struct resource_spec res_spec[] = {
186 { SYS_RES_MEMORY, 0, RF_ACTIVE },
187 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
188 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
189 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
194 driver_intr_t *handler;
196 } mge_intrs[MGE_INTR_COUNT + 1] = {
197 { mge_intr_rxtx,"GbE aggregated interrupt" },
198 { mge_intr_rx, "GbE receive interrupt" },
199 { mge_intr_tx, "GbE transmit interrupt" },
200 { mge_intr_misc,"GbE misc interrupt" },
201 { mge_intr_sum, "GbE summary interrupt" },
202 { mge_intr_err, "GbE error interrupt" },
205 /* SMI access interlock */
206 static struct sx sx_smi;
209 mv_read_ge_smi(device_t dev, int phy, int reg)
213 struct mge_softc *sc;
215 sc = device_get_softc(dev);
216 KASSERT(sc != NULL, ("NULL softc ptr!"));
217 timeout = MGE_SMI_WRITE_RETRIES;
221 (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
222 MGE_DELAY(MGE_SMI_WRITE_DELAY);
225 device_printf(dev, "SMI write timeout.\n");
230 MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
231 (MGE_SMI_READ | (reg << 21) | (phy << 16)));
233 /* Wait till finished. */
234 timeout = MGE_SMI_WRITE_RETRIES;
236 !((MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_READVALID)))
237 MGE_DELAY(MGE_SMI_WRITE_DELAY);
240 device_printf(dev, "SMI write validation timeout.\n");
245 /* Wait for the data to update in the SMI register */
246 MGE_DELAY(MGE_SMI_DELAY);
247 ret = MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
256 mv_write_ge_smi(device_t dev, int phy, int reg, uint32_t value)
259 struct mge_softc *sc;
261 sc = device_get_softc(dev);
262 KASSERT(sc != NULL, ("NULL softc ptr!"));
265 timeout = MGE_SMI_READ_RETRIES;
267 (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
268 MGE_DELAY(MGE_SMI_READ_DELAY);
271 device_printf(dev, "SMI read timeout.\n");
275 MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
276 (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
277 (value & MGE_SMI_DATA_MASK)));
284 mv_read_ext_phy(device_t dev, int phy, int reg)
287 struct mge_softc *sc;
290 sc = device_get_softc(dev);
293 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
294 (MGE_SMI_READ | (reg << 21) | (phy << 16)));
296 retries = MGE_SMI_READ_RETRIES;
298 !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
299 DELAY(MGE_SMI_READ_DELAY);
302 device_printf(dev, "Timeout while reading from PHY\n");
304 ret = MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
311 mv_write_ext_phy(device_t dev, int phy, int reg, int value)
314 struct mge_softc *sc;
316 sc = device_get_softc(dev);
319 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
320 (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
321 (value & MGE_SMI_DATA_MASK)));
323 retries = MGE_SMI_WRITE_RETRIES;
324 while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
325 DELAY(MGE_SMI_WRITE_DELAY);
328 device_printf(dev, "Timeout while writing to PHY\n");
333 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
335 uint32_t mac_l, mac_h;
340 * Retrieve hw address from the device tree.
342 i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
345 for (i = 0; i < 6; i++)
352 bcopy(lmac, addr, 6);
358 * Fall back -- use the currently programmed address.
360 mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
361 mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
363 addr[0] = (mac_h & 0xff000000) >> 24;
364 addr[1] = (mac_h & 0x00ff0000) >> 16;
365 addr[2] = (mac_h & 0x0000ff00) >> 8;
366 addr[3] = (mac_h & 0x000000ff);
367 addr[4] = (mac_l & 0x0000ff00) >> 8;
368 addr[5] = (mac_l & 0x000000ff);
372 mge_tfut_ipg(uint32_t val, int ver)
377 return ((val & 0x3fff) << 4);
380 return ((val & 0xffff) << 4);
385 mge_rx_ipg(uint32_t val, int ver)
390 return ((val & 0x3fff) << 8);
393 return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
398 mge_ver_params(struct mge_softc *sc)
403 if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
404 d == MV_DEV_88F6282 ||
405 d == MV_DEV_MV78100 ||
406 d == MV_DEV_MV78100_Z0 ||
407 (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
410 sc->mge_tfut_ipg_max = 0xFFFF;
411 sc->mge_rx_ipg_max = 0xFFFF;
412 sc->mge_tx_arb_cfg = 0xFC0000FF;
413 sc->mge_tx_tok_cfg = 0xFFFF7FFF;
414 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
418 sc->mge_tfut_ipg_max = 0x3FFF;
419 sc->mge_rx_ipg_max = 0x3FFF;
420 sc->mge_tx_arb_cfg = 0x000000FF;
421 sc->mge_tx_tok_cfg = 0x3FFFFFFF;
422 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
424 if (d == MV_DEV_88RC8180)
425 sc->mge_intr_cnt = 1;
427 sc->mge_intr_cnt = 2;
429 if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
436 mge_set_mac_address(struct mge_softc *sc)
439 uint32_t mac_l, mac_h;
441 MGE_GLOBAL_LOCK_ASSERT(sc);
443 if_mac = (char *)IF_LLADDR(sc->ifp);
445 mac_l = (if_mac[4] << 8) | (if_mac[5]);
446 mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
447 (if_mac[2] << 8) | (if_mac[3] << 0);
449 MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
450 MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
452 mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
456 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
458 uint32_t reg_idx, reg_off, reg_val, i;
461 reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
462 reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
463 reg_val = (1 | (queue << 1)) << reg_off;
465 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
467 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
469 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
474 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
476 uint32_t port_config;
479 /* Enable or disable promiscuous mode as needed */
480 if (sc->ifp->if_flags & IFF_PROMISC) {
481 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
482 port_config |= PORT_CONFIG_UPM;
483 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
485 reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
486 (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
488 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
489 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
490 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
493 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
494 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
497 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
498 port_config &= ~PORT_CONFIG_UPM;
499 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
501 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
502 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
503 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
506 mge_set_mac_address(sc);
511 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
515 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
518 *paddr = segs->ds_addr;
522 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
525 struct mbuf *new_mbuf;
526 bus_dma_segment_t seg[1];
530 KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
532 new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
533 if (new_mbuf == NULL)
535 new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
538 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
539 bus_dmamap_unload(tag, map);
542 error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
544 KASSERT(nsegs == 1, ("Too many segments returned!"));
545 if (nsegs != 1 || error)
546 panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
548 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
551 (*paddr) = seg->ds_addr;
556 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
557 uint32_t size, bus_dma_tag_t *buffer_tag)
559 struct mge_desc_wrapper *dw;
560 bus_addr_t desc_paddr;
564 for (i = size - 1; i >= 0; i--) {
566 error = bus_dmamem_alloc(sc->mge_desc_dtag,
567 (void**)&(dw->mge_desc),
568 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
572 if_printf(sc->ifp, "failed to allocate DMA memory\n");
577 error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
578 dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
579 &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
582 if_printf(sc->ifp, "can't load descriptor\n");
583 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
589 /* Chain descriptors */
590 dw->mge_desc->next_desc = desc_paddr;
591 desc_paddr = dw->mge_desc_paddr;
593 tab[size - 1].mge_desc->next_desc = desc_paddr;
595 /* Allocate a busdma tag for mbufs. */
596 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
597 1, 0, /* alignment, boundary */
598 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
599 BUS_SPACE_MAXADDR, /* highaddr */
600 NULL, NULL, /* filtfunc, filtfuncarg */
601 MCLBYTES, 1, /* maxsize, nsegments */
602 MCLBYTES, 0, /* maxsegsz, flags */
603 NULL, NULL, /* lockfunc, lockfuncarg */
604 buffer_tag); /* dmat */
606 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
610 /* Create TX busdma maps */
611 for (i = 0; i < size; i++) {
613 error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
615 if_printf(sc->ifp, "failed to create map for mbuf\n");
619 dw->buffer = (struct mbuf*)NULL;
620 dw->mge_desc->buffer = (bus_addr_t)NULL;
627 mge_allocate_dma(struct mge_softc *sc)
630 struct mge_desc_wrapper *dw;
633 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
634 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
635 16, 0, /* alignment, boundary */
636 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
637 BUS_SPACE_MAXADDR, /* highaddr */
638 NULL, NULL, /* filtfunc, filtfuncarg */
639 sizeof(struct mge_desc), 1, /* maxsize, nsegments */
640 sizeof(struct mge_desc), 0, /* maxsegsz, flags */
641 NULL, NULL, /* lockfunc, lockfuncarg */
642 &sc->mge_desc_dtag); /* dmat */
645 mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
647 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
650 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
651 dw = &(sc->mge_rx_desc[i]);
652 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
653 &dw->mge_desc->buffer);
656 sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
657 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
663 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
664 uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
666 struct mge_desc_wrapper *dw;
669 for (i = 0; i < size; i++) {
673 if (dw->buffer_dmap) {
675 bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
676 BUS_DMASYNC_POSTREAD);
677 bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
679 bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
683 /* Free RX descriptors */
685 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
686 BUS_DMASYNC_POSTREAD);
687 bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
688 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
695 mge_free_dma(struct mge_softc *sc)
698 /* Free desciptors and mbufs */
699 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
700 mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
702 /* Destroy mbuf dma tag */
703 bus_dma_tag_destroy(sc->mge_tx_dtag);
704 bus_dma_tag_destroy(sc->mge_rx_dtag);
705 /* Destroy descriptors tag */
706 bus_dma_tag_destroy(sc->mge_desc_dtag);
710 mge_reinit_rx(struct mge_softc *sc)
712 struct mge_desc_wrapper *dw;
715 MGE_RECEIVE_LOCK_ASSERT(sc);
717 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
719 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
722 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
723 dw = &(sc->mge_rx_desc[i]);
724 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
725 &dw->mge_desc->buffer);
728 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
729 sc->rx_desc_curr = 0;
731 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
734 /* Enable RX queue */
735 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
738 #ifdef DEVICE_POLLING
739 static poll_handler_t mge_poll;
742 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
744 struct mge_softc *sc = ifp->if_softc;
745 uint32_t int_cause, int_cause_ext;
748 MGE_RECEIVE_LOCK(sc);
750 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
751 MGE_RECEIVE_UNLOCK(sc);
755 if (cmd == POLL_AND_CHECK_STATUS) {
756 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
757 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
759 /* Check for resource error */
760 if (int_cause & MGE_PORT_INT_RXERRQ0)
763 if (int_cause || int_cause_ext) {
764 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
765 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
770 rx_npkts = mge_intr_rx_locked(sc, count);
772 MGE_RECEIVE_UNLOCK(sc);
773 MGE_TRANSMIT_LOCK(sc);
774 mge_intr_tx_locked(sc);
775 MGE_TRANSMIT_UNLOCK(sc);
778 #endif /* DEVICE_POLLING */
781 mge_attach(device_t dev)
783 struct mge_softc *sc;
784 struct mii_softc *miisc;
786 uint8_t hwaddr[ETHER_ADDR_LEN];
789 sc = device_get_softc(dev);
791 sc->node = ofw_bus_get_node(dev);
794 if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) == 0) {
795 device_printf(dev, "PHY%i attached, phy_sc points to %s\n", phy,
796 device_get_nameunit(sc->phy_sc->dev));
797 sc->phy_attached = 1;
799 device_printf(dev, "PHY not attached.\n");
800 sc->phy_attached = 0;
804 if (fdt_find_compatible(sc->node, "mrvl,sw", 1) != 0) {
805 device_printf(dev, "Switch attached.\n");
806 sc->switch_attached = 1;
807 /* additional variable available across instances */
810 sc->switch_attached = 0;
813 if (device_get_unit(dev) == 0) {
814 sx_init(&sx_smi, "mge_tick() SMI access threads interlock");
817 /* Set chip version-dependent parameters */
820 /* Initialize mutexes */
821 mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock",
823 mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock",
826 /* Allocate IO and IRQ resources */
827 error = bus_alloc_resources(dev, res_spec, sc->res);
829 device_printf(dev, "could not allocate resources\n");
834 /* Allocate DMA, buffers, buffer descriptors */
835 error = mge_allocate_dma(sc);
841 sc->tx_desc_curr = 0;
842 sc->rx_desc_curr = 0;
843 sc->tx_desc_used_idx = 0;
844 sc->tx_desc_used_count = 0;
846 /* Configure defaults for interrupts coalescing */
847 sc->rx_ic_time = 768;
848 sc->tx_ic_time = 768;
851 /* Allocate network interface */
852 ifp = sc->ifp = if_alloc(IFT_ETHER);
854 device_printf(dev, "if_alloc() failed\n");
859 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
861 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
862 ifp->if_capabilities = IFCAP_VLAN_MTU;
863 if (sc->mge_hw_csum) {
864 ifp->if_capabilities |= IFCAP_HWCSUM;
865 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
867 ifp->if_capenable = ifp->if_capabilities;
869 #ifdef DEVICE_POLLING
870 /* Advertise that polling is supported */
871 ifp->if_capabilities |= IFCAP_POLLING;
874 ifp->if_init = mge_init;
875 ifp->if_start = mge_start;
876 ifp->if_ioctl = mge_ioctl;
878 ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
879 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
880 IFQ_SET_READY(&ifp->if_snd);
882 mge_get_mac_address(sc, hwaddr);
883 ether_ifattach(ifp, hwaddr);
884 callout_init(&sc->wd_callout, 0);
887 if (sc->phy_attached) {
888 error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
889 mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
891 device_printf(dev, "MII failed to find PHY\n");
897 sc->mii = device_get_softc(sc->miibus);
899 /* Tell the MAC where to find the PHY so autoneg works */
900 miisc = LIST_FIRST(&sc->mii->mii_phys);
901 MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
903 /* no PHY, so use hard-coded values */
904 ifmedia_init(&sc->mge_ifmedia, 0,
907 ifmedia_add(&sc->mge_ifmedia,
908 IFM_ETHER | IFM_1000_T | IFM_FDX,
910 ifmedia_set(&sc->mge_ifmedia,
911 IFM_ETHER | IFM_1000_T | IFM_FDX);
914 /* Attach interrupt handlers */
915 /* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
916 for (i = 1; i <= sc->mge_intr_cnt; ++i) {
917 error = bus_setup_intr(dev, sc->res[i],
918 INTR_TYPE_NET | INTR_MPSAFE,
919 NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
920 sc, &sc->ih_cookie[i - 1]);
922 device_printf(dev, "could not setup %s\n",
923 mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
929 if (sc->switch_attached) {
931 MGE_WRITE(sc, MGE_REG_PHYDEV, MGE_SWITCH_PHYDEV);
932 child = device_add_child(dev, "mdio", -1);
933 bus_generic_attach(dev);
940 mge_detach(device_t dev)
942 struct mge_softc *sc;
945 sc = device_get_softc(dev);
947 /* Stop controller and free TX queue */
951 /* Wait for stopping ticks */
952 callout_drain(&sc->wd_callout);
954 /* Stop and release all interrupts */
955 for (i = 0; i < sc->mge_intr_cnt; ++i) {
956 if (!sc->ih_cookie[i])
959 error = bus_teardown_intr(dev, sc->res[1 + i],
962 device_printf(dev, "could not release %s\n",
963 mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
966 /* Detach network interface */
968 ether_ifdetach(sc->ifp);
972 /* Free DMA resources */
975 /* Free IO memory handler */
976 bus_release_resources(dev, res_spec, sc->res);
978 /* Destroy mutexes */
979 mtx_destroy(&sc->receive_lock);
980 mtx_destroy(&sc->transmit_lock);
982 if (device_get_unit(dev) == 0)
989 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
991 struct mge_softc *sc;
992 struct mii_data *mii;
997 if (!sc->phy_attached) {
998 ifmr->ifm_active = IFM_1000_T | IFM_FDX | IFM_ETHER;
999 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1006 ifmr->ifm_active = mii->mii_media_active;
1007 ifmr->ifm_status = mii->mii_media_status;
1010 MGE_GLOBAL_UNLOCK(sc);
1014 mge_set_port_serial_control(uint32_t media)
1016 uint32_t port_config;
1018 port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
1019 PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
1021 if (IFM_TYPE(media) == IFM_ETHER) {
1022 switch(IFM_SUBTYPE(media)) {
1026 port_config |= (PORT_SERIAL_GMII_SPEED_1000 |
1027 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1028 | PORT_SERIAL_SPEED_AUTONEG);
1031 port_config |= (PORT_SERIAL_MII_SPEED_100 |
1032 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1033 | PORT_SERIAL_SPEED_AUTONEG);
1036 port_config |= (PORT_SERIAL_AUTONEG |
1037 PORT_SERIAL_AUTONEG_FC |
1038 PORT_SERIAL_SPEED_AUTONEG);
1041 if (media & IFM_FDX)
1042 port_config |= PORT_SERIAL_FULL_DUPLEX;
1044 return (port_config);
1048 mge_ifmedia_upd(struct ifnet *ifp)
1050 struct mge_softc *sc = ifp->if_softc;
1053 * Do not do anything for switch here, as updating media between
1054 * MGE MAC and switch MAC is hardcoded in PCB. Changing it here would
1057 if (sc->phy_attached) {
1058 MGE_GLOBAL_LOCK(sc);
1059 if (ifp->if_flags & IFF_UP) {
1060 sc->mge_media_status = sc->mii->mii_media.ifm_media;
1061 mii_mediachg(sc->mii);
1063 /* MGE MAC needs to be reinitialized. */
1064 mge_init_locked(sc);
1067 MGE_GLOBAL_UNLOCK(sc);
1076 struct mge_softc *sc;
1079 MGE_GLOBAL_LOCK(sc);
1081 mge_init_locked(arg);
1083 MGE_GLOBAL_UNLOCK(sc);
1087 mge_init_locked(void *arg)
1089 struct mge_softc *sc = arg;
1090 struct mge_desc_wrapper *dw;
1091 volatile uint32_t reg_val;
1093 uint32_t media_status;
1096 MGE_GLOBAL_LOCK_ASSERT(sc);
1098 /* Stop interface */
1101 /* Disable interrupts */
1102 mge_intrs_ctrl(sc, 0);
1104 /* Set MAC address */
1105 mge_set_mac_address(sc);
1107 /* Setup multicast filters */
1108 mge_setup_multicast(sc);
1110 if (sc->mge_ver == 2) {
1111 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
1112 MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
1115 /* Initialize TX queue configuration registers */
1116 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
1117 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
1118 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
1120 /* Clear TX queue configuration registers for unused queues */
1121 for (i = 1; i < 7; i++) {
1122 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
1123 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
1124 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
1127 /* Set default MTU */
1128 MGE_WRITE(sc, sc->mge_mtu, 0);
1130 /* Port configuration */
1131 MGE_WRITE(sc, MGE_PORT_CONFIG,
1132 PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
1133 PORT_CONFIG_ARO_RXQ(0));
1134 MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
1136 /* Configure promisc mode */
1137 mge_set_prom_mode(sc, MGE_RX_DEFAULT_QUEUE);
1139 media_status = sc->mge_media_status;
1140 if (sc->switch_attached) {
1141 media_status &= ~IFM_TMASK;
1142 media_status |= IFM_1000_T;
1145 /* Setup port configuration */
1146 reg_val = mge_set_port_serial_control(media_status);
1147 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1149 /* Setup SDMA configuration */
1150 MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
1151 MGE_SDMA_TX_BYTE_SWAP |
1152 MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
1153 MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
1155 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
1157 MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
1158 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
1161 /* Reset descriptor indexes */
1162 sc->tx_desc_curr = 0;
1163 sc->rx_desc_curr = 0;
1164 sc->tx_desc_used_idx = 0;
1165 sc->tx_desc_used_count = 0;
1167 /* Enable RX descriptors */
1168 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
1169 dw = &sc->mge_rx_desc[i];
1170 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1171 dw->mge_desc->buff_size = MCLBYTES;
1172 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1173 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1176 /* Enable RX queue */
1177 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
1180 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1181 reg_val |= PORT_SERIAL_ENABLE;
1182 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1185 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1186 if (reg_val & MGE_STATUS_LINKUP)
1190 if_printf(sc->ifp, "Timeout on link-up\n");
1195 /* Setup interrupts coalescing */
1199 /* Enable interrupts */
1200 #ifdef DEVICE_POLLING
1202 * * ...only if polling is not turned on. Disable interrupts explicitly
1203 * if polling is enabled.
1205 if (sc->ifp->if_capenable & IFCAP_POLLING)
1206 mge_intrs_ctrl(sc, 0);
1208 #endif /* DEVICE_POLLING */
1209 mge_intrs_ctrl(sc, 1);
1211 /* Activate network interface */
1212 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1213 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1216 /* Schedule watchdog timeout */
1217 if (sc->phy_attached)
1218 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1222 mge_intr_rxtx(void *arg)
1224 struct mge_softc *sc;
1225 uint32_t int_cause, int_cause_ext;
1228 MGE_GLOBAL_LOCK(sc);
1230 #ifdef DEVICE_POLLING
1231 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1232 MGE_GLOBAL_UNLOCK(sc);
1237 /* Get interrupt cause */
1238 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1239 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1241 /* Check for Transmit interrupt */
1242 if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
1243 MGE_PORT_INT_EXT_TXUR)) {
1244 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1245 (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1246 mge_intr_tx_locked(sc);
1249 MGE_TRANSMIT_UNLOCK(sc);
1251 /* Check for Receive interrupt */
1252 mge_intr_rx_check(sc, int_cause, int_cause_ext);
1254 MGE_RECEIVE_UNLOCK(sc);
1258 mge_intr_err(void *arg)
1260 struct mge_softc *sc;
1265 if_printf(ifp, "%s\n", __FUNCTION__);
1269 mge_intr_misc(void *arg)
1271 struct mge_softc *sc;
1276 if_printf(ifp, "%s\n", __FUNCTION__);
1280 mge_intr_rx(void *arg) {
1281 struct mge_softc *sc;
1282 uint32_t int_cause, int_cause_ext;
1285 MGE_RECEIVE_LOCK(sc);
1287 #ifdef DEVICE_POLLING
1288 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1289 MGE_RECEIVE_UNLOCK(sc);
1294 /* Get interrupt cause */
1295 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1296 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1298 mge_intr_rx_check(sc, int_cause, int_cause_ext);
1300 MGE_RECEIVE_UNLOCK(sc);
1304 mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
1305 uint32_t int_cause_ext)
1307 /* Check for resource error */
1308 if (int_cause & MGE_PORT_INT_RXERRQ0) {
1310 MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1311 ~(int_cause & MGE_PORT_INT_RXERRQ0));
1314 int_cause &= MGE_PORT_INT_RXQ0;
1315 int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1317 if (int_cause || int_cause_ext) {
1318 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1319 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1320 mge_intr_rx_locked(sc, -1);
1325 mge_intr_rx_locked(struct mge_softc *sc, int count)
1327 struct ifnet *ifp = sc->ifp;
1330 struct mge_desc_wrapper* dw;
1334 MGE_RECEIVE_LOCK_ASSERT(sc);
1336 while (count != 0) {
1337 dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1338 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1339 BUS_DMASYNC_POSTREAD);
1342 status = dw->mge_desc->cmd_status;
1343 bufsize = dw->mge_desc->buff_size;
1344 if ((status & MGE_DMA_OWNED) != 0)
1347 if (dw->mge_desc->byte_count &&
1348 ~(status & MGE_ERR_SUMMARY)) {
1350 bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1351 BUS_DMASYNC_POSTREAD);
1353 mb = m_devget(dw->buffer->m_data,
1354 dw->mge_desc->byte_count - ETHER_CRC_LEN,
1358 /* Give up if no mbufs */
1362 mb->m_pkthdr.len -= 2;
1365 mb->m_pkthdr.rcvif = ifp;
1367 mge_offload_process_frame(ifp, mb, status,
1370 MGE_RECEIVE_UNLOCK(sc);
1371 (*ifp->if_input)(ifp, mb);
1372 MGE_RECEIVE_LOCK(sc);
1376 dw->mge_desc->byte_count = 0;
1377 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1378 sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1379 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1380 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1386 if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_npkts);
1392 mge_intr_sum(void *arg)
1394 struct mge_softc *sc = arg;
1398 if_printf(ifp, "%s\n", __FUNCTION__);
1402 mge_intr_tx(void *arg)
1404 struct mge_softc *sc = arg;
1405 uint32_t int_cause_ext;
1407 MGE_TRANSMIT_LOCK(sc);
1409 #ifdef DEVICE_POLLING
1410 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1411 MGE_TRANSMIT_UNLOCK(sc);
1416 /* Ack the interrupt */
1417 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1418 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1419 (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1421 mge_intr_tx_locked(sc);
1423 MGE_TRANSMIT_UNLOCK(sc);
1427 mge_intr_tx_locked(struct mge_softc *sc)
1429 struct ifnet *ifp = sc->ifp;
1430 struct mge_desc_wrapper *dw;
1431 struct mge_desc *desc;
1435 MGE_TRANSMIT_LOCK_ASSERT(sc);
1437 /* Disable watchdog */
1440 while (sc->tx_desc_used_count) {
1441 /* Get the descriptor */
1442 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1443 desc = dw->mge_desc;
1444 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1445 BUS_DMASYNC_POSTREAD);
1447 /* Get descriptor status */
1448 status = desc->cmd_status;
1450 if (status & MGE_DMA_OWNED)
1453 sc->tx_desc_used_idx =
1454 (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1455 sc->tx_desc_used_count--;
1457 /* Update collision statistics */
1458 if (status & MGE_ERR_SUMMARY) {
1459 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1460 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1461 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1462 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16);
1465 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1466 BUS_DMASYNC_POSTWRITE);
1467 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1468 m_freem(dw->buffer);
1469 dw->buffer = (struct mbuf*)NULL;
1472 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1476 /* Now send anything that was pending */
1477 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1478 mge_start_locked(ifp);
1482 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1484 struct mge_softc *sc = ifp->if_softc;
1485 struct ifreq *ifr = (struct ifreq *)data;
1493 MGE_GLOBAL_LOCK(sc);
1495 if (ifp->if_flags & IFF_UP) {
1496 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1497 flags = ifp->if_flags ^ sc->mge_if_flags;
1498 if (flags & IFF_PROMISC)
1499 mge_set_prom_mode(sc,
1500 MGE_RX_DEFAULT_QUEUE);
1502 if (flags & IFF_ALLMULTI)
1503 mge_setup_multicast(sc);
1505 mge_init_locked(sc);
1507 else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1510 sc->mge_if_flags = ifp->if_flags;
1511 MGE_GLOBAL_UNLOCK(sc);
1515 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1516 MGE_GLOBAL_LOCK(sc);
1517 mge_setup_multicast(sc);
1518 MGE_GLOBAL_UNLOCK(sc);
1522 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1523 if (mask & IFCAP_HWCSUM) {
1524 ifp->if_capenable &= ~IFCAP_HWCSUM;
1525 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1526 if (ifp->if_capenable & IFCAP_TXCSUM)
1527 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1529 ifp->if_hwassist = 0;
1531 #ifdef DEVICE_POLLING
1532 if (mask & IFCAP_POLLING) {
1533 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1534 error = ether_poll_register(mge_poll, ifp);
1538 MGE_GLOBAL_LOCK(sc);
1539 mge_intrs_ctrl(sc, 0);
1540 ifp->if_capenable |= IFCAP_POLLING;
1541 MGE_GLOBAL_UNLOCK(sc);
1543 error = ether_poll_deregister(ifp);
1544 MGE_GLOBAL_LOCK(sc);
1545 mge_intrs_ctrl(sc, 1);
1546 ifp->if_capenable &= ~IFCAP_POLLING;
1547 MGE_GLOBAL_UNLOCK(sc);
1552 case SIOCGIFMEDIA: /* fall through */
1555 * Setting up media type via ioctls is *not* supported for MAC
1556 * which is connected to switch. Use etherswitchcfg.
1558 if (!sc->phy_attached && (command == SIOCSIFMEDIA))
1560 else if (!sc->phy_attached) {
1561 error = ifmedia_ioctl(ifp, ifr, &sc->mge_ifmedia,
1566 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1567 && !(ifr->ifr_media & IFM_FDX)) {
1568 device_printf(sc->dev,
1569 "1000baseTX half-duplex unsupported\n");
1572 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1575 error = ether_ioctl(ifp, command, data);
1581 mge_miibus_readreg(device_t dev, int phy, int reg)
1583 struct mge_softc *sc;
1584 sc = device_get_softc(dev);
1586 KASSERT(!switch_attached, ("miibus used with switch attached"));
1588 return (mv_read_ext_phy(dev, phy, reg));
1592 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1594 struct mge_softc *sc;
1595 sc = device_get_softc(dev);
1597 KASSERT(!switch_attached, ("miibus used with switch attached"));
1599 mv_write_ext_phy(dev, phy, reg, value);
1605 mge_probe(device_t dev)
1608 if (!ofw_bus_status_okay(dev))
1611 if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1614 device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1615 return (BUS_PROBE_DEFAULT);
1619 mge_resume(device_t dev)
1622 device_printf(dev, "%s\n", __FUNCTION__);
1627 mge_shutdown(device_t dev)
1629 struct mge_softc *sc = device_get_softc(dev);
1631 MGE_GLOBAL_LOCK(sc);
1633 #ifdef DEVICE_POLLING
1634 if (sc->ifp->if_capenable & IFCAP_POLLING)
1635 ether_poll_deregister(sc->ifp);
1640 MGE_GLOBAL_UNLOCK(sc);
1646 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1648 struct mge_desc_wrapper *dw = NULL;
1650 bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1658 /* Fetch unused map */
1659 desc_no = sc->tx_desc_curr;
1660 dw = &sc->mge_tx_desc[desc_no];
1661 mapp = dw->buffer_dmap;
1663 /* Create mapping in DMA memory */
1664 error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1671 /* Only one segment is supported. */
1673 bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1678 bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1680 /* Everything is ok, now we can send buffers */
1681 for (seg = 0; seg < nsegs; seg++) {
1682 dw->mge_desc->byte_count = segs[seg].ds_len;
1683 dw->mge_desc->buffer = segs[seg].ds_addr;
1685 dw->mge_desc->cmd_status = 0;
1687 mge_offload_setup_descriptor(sc, dw);
1688 dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1689 MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1693 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1694 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1696 sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1697 sc->tx_desc_used_count++;
1704 struct mge_softc *sc = msc;
1706 KASSERT(sc->phy_attached == 1, ("mge_tick while PHY not attached"));
1708 MGE_GLOBAL_LOCK(sc);
1710 /* Check for TX timeout */
1715 /* Check for media type change */
1716 if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1717 mge_ifmedia_upd(sc->ifp);
1719 MGE_GLOBAL_UNLOCK(sc);
1721 /* Schedule another timeout one second from now */
1722 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1728 mge_watchdog(struct mge_softc *sc)
1734 if (sc->wd_timer == 0 || --sc->wd_timer) {
1738 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1739 if_printf(ifp, "watchdog timeout\n");
1742 mge_init_locked(sc);
1746 mge_start(struct ifnet *ifp)
1748 struct mge_softc *sc = ifp->if_softc;
1750 MGE_TRANSMIT_LOCK(sc);
1752 mge_start_locked(ifp);
1754 MGE_TRANSMIT_UNLOCK(sc);
1758 mge_start_locked(struct ifnet *ifp)
1760 struct mge_softc *sc;
1761 struct mbuf *m0, *mtmp;
1762 uint32_t reg_val, queued = 0;
1766 MGE_TRANSMIT_LOCK_ASSERT(sc);
1768 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1773 /* Get packet from the queue */
1774 IF_DEQUEUE(&ifp->if_snd, m0);
1778 if (m0->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP) ||
1779 m0->m_flags & M_VLANTAG) {
1780 if (M_WRITABLE(m0) == 0) {
1781 mtmp = m_dup(m0, M_NOWAIT);
1788 /* The driver support only one DMA fragment. */
1789 if (m0->m_next != NULL) {
1790 mtmp = m_defrag(m0, M_NOWAIT);
1795 /* Check for free descriptors */
1796 if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1797 IF_PREPEND(&ifp->if_snd, m0);
1798 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1802 if (mge_encap(sc, m0) != 0)
1810 /* Enable transmitter and watchdog timer */
1811 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1812 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1818 mge_stop(struct mge_softc *sc)
1821 volatile uint32_t reg_val, status;
1822 struct mge_desc_wrapper *dw;
1823 struct mge_desc *desc;
1828 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1831 /* Stop tick engine */
1832 callout_stop(&sc->wd_callout);
1834 /* Disable interface */
1835 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1838 /* Disable interrupts */
1839 mge_intrs_ctrl(sc, 0);
1841 /* Disable Rx and Tx */
1842 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1843 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1844 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1846 /* Remove pending data from TX queue */
1847 while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1848 sc->tx_desc_used_count) {
1849 /* Get the descriptor */
1850 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1851 desc = dw->mge_desc;
1852 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1853 BUS_DMASYNC_POSTREAD);
1855 /* Get descriptor status */
1856 status = desc->cmd_status;
1858 if (status & MGE_DMA_OWNED)
1861 sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1863 sc->tx_desc_used_count--;
1865 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1866 BUS_DMASYNC_POSTWRITE);
1867 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1869 m_freem(dw->buffer);
1870 dw->buffer = (struct mbuf*)NULL;
1873 /* Wait for end of transmission */
1876 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1877 if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1878 (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1885 "%s: timeout while waiting for end of transmission\n",
1888 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1889 reg_val &= ~(PORT_SERIAL_ENABLE);
1890 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1894 mge_suspend(device_t dev)
1897 device_printf(dev, "%s\n", __FUNCTION__);
1902 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1903 uint32_t status, uint16_t bufsize)
1907 if (ifp->if_capenable & IFCAP_RXCSUM) {
1908 if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1909 csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1911 if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1912 (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1913 (status & MGE_RX_L4_CSUM_OK)) {
1914 csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1915 frame->m_pkthdr.csum_data = 0xFFFF;
1918 frame->m_pkthdr.csum_flags = csum_flags;
1923 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1925 struct mbuf *m0 = dw->buffer;
1926 struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1927 int csum_flags = m0->m_pkthdr.csum_flags;
1932 if (csum_flags != 0) {
1933 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1934 etype = ntohs(eh->evl_proto);
1935 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1936 csum_flags |= MGE_TX_VLAN_TAGGED;
1938 etype = ntohs(eh->evl_encap_proto);
1939 ehlen = ETHER_HDR_LEN;
1942 if (etype != ETHERTYPE_IP) {
1944 "TCP/IP Offload enabled for unsupported "
1949 ip = (struct ip *)(m0->m_data + ehlen);
1950 cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1951 cmd_status |= MGE_TX_NOT_FRAGMENT;
1954 if (csum_flags & CSUM_IP)
1955 cmd_status |= MGE_TX_GEN_IP_CSUM;
1957 if (csum_flags & CSUM_TCP)
1958 cmd_status |= MGE_TX_GEN_L4_CSUM;
1960 if (csum_flags & CSUM_UDP)
1961 cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1963 dw->mge_desc->cmd_status |= cmd_status;
1967 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1971 MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1972 MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1973 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1974 MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1975 MGE_PORT_INT_EXT_TXBUF0);
1977 MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1978 MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1980 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1981 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1983 MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1984 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1989 mge_crc8(uint8_t *data, int size)
1992 static const uint8_t ct[256] = {
1993 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1994 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1995 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1996 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1997 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1998 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1999 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
2000 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
2001 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
2002 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
2003 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
2004 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
2005 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
2006 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
2007 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
2008 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
2009 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
2010 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
2011 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
2012 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
2013 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
2014 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
2015 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
2016 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
2017 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
2018 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
2019 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
2020 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
2021 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
2022 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
2023 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
2024 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
2028 crc = ct[crc ^ *(data++)];
2034 mge_setup_multicast(struct mge_softc *sc)
2036 uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
2037 uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2038 uint32_t smt[MGE_MCAST_REG_NUMBER];
2039 uint32_t omt[MGE_MCAST_REG_NUMBER];
2040 struct ifnet *ifp = sc->ifp;
2041 struct ifmultiaddr *ifma;
2045 if (ifp->if_flags & IFF_ALLMULTI) {
2046 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
2047 smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
2049 memset(smt, 0, sizeof(smt));
2050 memset(omt, 0, sizeof(omt));
2052 if_maddr_rlock(ifp);
2053 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2054 if (ifma->ifma_addr->sa_family != AF_LINK)
2057 mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2058 if (memcmp(mac, special, sizeof(special)) == 0) {
2060 smt[i >> 2] |= v << ((i & 0x03) << 3);
2062 i = mge_crc8(mac, ETHER_ADDR_LEN);
2063 omt[i >> 2] |= v << ((i & 0x03) << 3);
2066 if_maddr_runlock(ifp);
2069 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
2070 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
2071 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
2076 mge_set_rxic(struct mge_softc *sc)
2080 if (sc->rx_ic_time > sc->mge_rx_ipg_max)
2081 sc->rx_ic_time = sc->mge_rx_ipg_max;
2083 reg = MGE_READ(sc, MGE_SDMA_CONFIG);
2084 reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
2085 reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
2086 MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
2090 mge_set_txic(struct mge_softc *sc)
2094 if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
2095 sc->tx_ic_time = sc->mge_tfut_ipg_max;
2097 reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
2098 reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
2099 reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
2100 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
2104 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
2106 struct mge_softc *sc = (struct mge_softc *)arg1;
2110 time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
2111 error = sysctl_handle_int(oidp, &time, 0, req);
2115 MGE_GLOBAL_LOCK(sc);
2116 if (arg2 == MGE_IC_RX) {
2117 sc->rx_ic_time = time;
2120 sc->tx_ic_time = time;
2123 MGE_GLOBAL_UNLOCK(sc);
2129 mge_add_sysctls(struct mge_softc *sc)
2131 struct sysctl_ctx_list *ctx;
2132 struct sysctl_oid_list *children;
2133 struct sysctl_oid *tree;
2135 ctx = device_get_sysctl_ctx(sc->dev);
2136 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2137 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
2138 CTLFLAG_RD, 0, "MGE Interrupts coalescing");
2139 children = SYSCTL_CHILDREN(tree);
2141 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
2142 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
2143 "I", "IC RX time threshold");
2144 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
2145 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
2146 "I", "IC TX time threshold");
2150 mge_mdio_writereg(device_t dev, int phy, int reg, int value)
2153 mv_write_ge_smi(dev, phy, reg, value);
2160 mge_mdio_readreg(device_t dev, int phy, int reg)
2164 ret = mv_read_ge_smi(dev, phy, reg);