2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5 * Developed by Semihalf.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of MARVELL nor the names of contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
50 #include <net/ethernet.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
63 #include <sys/sockio.h>
65 #include <machine/bus.h>
67 #include <machine/resource.h>
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
72 #include <dev/fdt/fdt_common.h>
73 #include <dev/ofw/ofw_bus.h>
74 #include <dev/ofw/ofw_bus_subr.h>
76 #include <dev/mge/if_mgevar.h>
77 #include <arm/mv/mvreg.h>
78 #include <arm/mv/mvvar.h>
80 #include "miibus_if.h"
82 static int mge_probe(device_t dev);
83 static int mge_attach(device_t dev);
84 static int mge_detach(device_t dev);
85 static int mge_shutdown(device_t dev);
86 static int mge_suspend(device_t dev);
87 static int mge_resume(device_t dev);
89 static int mge_miibus_readreg(device_t dev, int phy, int reg);
90 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
92 static int mge_ifmedia_upd(struct ifnet *ifp);
93 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
95 static void mge_init(void *arg);
96 static void mge_init_locked(void *arg);
97 static void mge_start(struct ifnet *ifp);
98 static void mge_start_locked(struct ifnet *ifp);
99 static void mge_watchdog(struct mge_softc *sc);
100 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
102 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
103 static uint32_t mge_rx_ipg(uint32_t val, int ver);
104 static void mge_ver_params(struct mge_softc *sc);
106 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
107 static void mge_intr_rx(void *arg);
108 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
109 static void mge_intr_tx(void *arg);
110 static void mge_intr_tx_locked(struct mge_softc *sc);
111 static void mge_intr_misc(void *arg);
112 static void mge_intr_sum(void *arg);
113 static void mge_intr_err(void *arg);
114 static void mge_stop(struct mge_softc *sc);
115 static void mge_tick(void *msc);
116 static uint32_t mge_set_port_serial_control(uint32_t media);
117 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
118 static void mge_set_mac_address(struct mge_softc *sc);
119 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
121 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
122 static int mge_allocate_dma(struct mge_softc *sc);
123 static int mge_alloc_desc_dma(struct mge_softc *sc,
124 struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
125 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
126 struct mbuf **mbufp, bus_addr_t *paddr);
127 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
128 static void mge_free_dma(struct mge_softc *sc);
129 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
130 bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
131 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
132 uint32_t status, uint16_t bufsize);
133 static void mge_offload_setup_descriptor(struct mge_softc *sc,
134 struct mge_desc_wrapper *dw);
135 static uint8_t mge_crc8(uint8_t *data, int size);
136 static void mge_setup_multicast(struct mge_softc *sc);
137 static void mge_set_rxic(struct mge_softc *sc);
138 static void mge_set_txic(struct mge_softc *sc);
139 static void mge_add_sysctls(struct mge_softc *sc);
140 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
142 static device_method_t mge_methods[] = {
143 /* Device interface */
144 DEVMETHOD(device_probe, mge_probe),
145 DEVMETHOD(device_attach, mge_attach),
146 DEVMETHOD(device_detach, mge_detach),
147 DEVMETHOD(device_shutdown, mge_shutdown),
148 DEVMETHOD(device_suspend, mge_suspend),
149 DEVMETHOD(device_resume, mge_resume),
151 DEVMETHOD(miibus_readreg, mge_miibus_readreg),
152 DEVMETHOD(miibus_writereg, mge_miibus_writereg),
156 static driver_t mge_driver = {
159 sizeof(struct mge_softc),
162 static devclass_t mge_devclass;
164 DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0);
165 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
166 MODULE_DEPEND(mge, ether, 1, 1, 1);
167 MODULE_DEPEND(mge, miibus, 1, 1, 1);
169 static struct resource_spec res_spec[] = {
170 { SYS_RES_MEMORY, 0, RF_ACTIVE },
171 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
172 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
173 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
174 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
175 { SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
180 driver_intr_t *handler;
182 } mge_intrs[MGE_INTR_COUNT] = {
183 { mge_intr_rx, "GbE receive interrupt" },
184 { mge_intr_tx, "GbE transmit interrupt" },
185 { mge_intr_misc,"GbE misc interrupt" },
186 { mge_intr_sum, "GbE summary interrupt" },
187 { mge_intr_err, "GbE error interrupt" },
191 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
193 uint32_t mac_l, mac_h;
198 * Retrieve hw address from the device tree.
200 i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
203 for (i = 0; i < 6; i++)
210 bcopy(lmac, addr, 6);
216 * Fall back -- use the currently programmed address.
218 mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
219 mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
221 addr[0] = (mac_h & 0xff000000) >> 24;
222 addr[1] = (mac_h & 0x00ff0000) >> 16;
223 addr[2] = (mac_h & 0x0000ff00) >> 8;
224 addr[3] = (mac_h & 0x000000ff);
225 addr[4] = (mac_l & 0x0000ff00) >> 8;
226 addr[5] = (mac_l & 0x000000ff);
230 mge_tfut_ipg(uint32_t val, int ver)
235 return ((val & 0x3fff) << 4);
238 return ((val & 0xffff) << 4);
243 mge_rx_ipg(uint32_t val, int ver)
248 return ((val & 0x3fff) << 8);
251 return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
256 mge_ver_params(struct mge_softc *sc)
261 if (d == MV_DEV_88F6281 || d == MV_DEV_MV78100 ||
262 d == MV_DEV_MV78100_Z0) {
265 sc->mge_tfut_ipg_max = 0xFFFF;
266 sc->mge_rx_ipg_max = 0xFFFF;
267 sc->mge_tx_arb_cfg = 0xFC0000FF;
268 sc->mge_tx_tok_cfg = 0xFFFF7FFF;
269 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
273 sc->mge_tfut_ipg_max = 0x3FFF;
274 sc->mge_rx_ipg_max = 0x3FFF;
275 sc->mge_tx_arb_cfg = 0x000000FF;
276 sc->mge_tx_tok_cfg = 0x3FFFFFFF;
277 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
282 mge_set_mac_address(struct mge_softc *sc)
285 uint32_t mac_l, mac_h;
287 MGE_GLOBAL_LOCK_ASSERT(sc);
289 if_mac = (char *)IF_LLADDR(sc->ifp);
291 mac_l = (if_mac[4] << 8) | (if_mac[5]);
292 mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
293 (if_mac[2] << 8) | (if_mac[3] << 0);
295 MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
296 MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
298 mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
302 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
304 uint32_t reg_idx, reg_off, reg_val, i;
307 reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
308 reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
309 reg_val = (1 | (queue << 1)) << reg_off;
311 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
313 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
315 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
320 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
322 uint32_t port_config;
325 /* Enable or disable promiscuous mode as needed */
326 if (sc->ifp->if_flags & IFF_PROMISC) {
327 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
328 port_config |= PORT_CONFIG_UPM;
329 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
331 reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
332 (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
334 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
335 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
336 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
339 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
340 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
343 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
344 port_config &= ~PORT_CONFIG_UPM;
345 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
347 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
348 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
349 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
352 mge_set_mac_address(sc);
357 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
361 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
364 *paddr = segs->ds_addr;
368 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
371 struct mbuf *new_mbuf;
372 bus_dma_segment_t seg[1];
376 KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
378 new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
379 if (new_mbuf == NULL)
381 new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
384 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
385 bus_dmamap_unload(tag, map);
388 error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
390 KASSERT(nsegs == 1, ("Too many segments returned!"));
391 if (nsegs != 1 || error)
392 panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
394 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
397 (*paddr) = seg->ds_addr;
402 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
403 uint32_t size, bus_dma_tag_t *buffer_tag)
405 struct mge_desc_wrapper *dw;
406 bus_addr_t desc_paddr;
410 for (i = size - 1; i >= 0; i--) {
412 error = bus_dmamem_alloc(sc->mge_desc_dtag,
413 (void**)&(dw->mge_desc),
414 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
418 if_printf(sc->ifp, "failed to allocate DMA memory\n");
423 error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
424 dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
425 &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
428 if_printf(sc->ifp, "can't load descriptor\n");
429 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
435 /* Chain descriptors */
436 dw->mge_desc->next_desc = desc_paddr;
437 desc_paddr = dw->mge_desc_paddr;
439 tab[size - 1].mge_desc->next_desc = desc_paddr;
441 /* Allocate a busdma tag for mbufs. */
442 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
443 8, 0, /* alignment, boundary */
444 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
445 BUS_SPACE_MAXADDR, /* highaddr */
446 NULL, NULL, /* filtfunc, filtfuncarg */
447 MCLBYTES, 1, /* maxsize, nsegments */
448 MCLBYTES, 0, /* maxsegsz, flags */
449 NULL, NULL, /* lockfunc, lockfuncarg */
450 buffer_tag); /* dmat */
452 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
456 /* Create TX busdma maps */
457 for (i = 0; i < size; i++) {
459 error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
461 if_printf(sc->ifp, "failed to create map for mbuf\n");
465 dw->buffer = (struct mbuf*)NULL;
466 dw->mge_desc->buffer = (bus_addr_t)NULL;
473 mge_allocate_dma(struct mge_softc *sc)
476 struct mge_desc_wrapper *dw;
479 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
480 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
481 16, 0, /* alignment, boundary */
482 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
483 BUS_SPACE_MAXADDR, /* highaddr */
484 NULL, NULL, /* filtfunc, filtfuncarg */
485 sizeof(struct mge_desc), 1, /* maxsize, nsegments */
486 sizeof(struct mge_desc), 0, /* maxsegsz, flags */
487 NULL, NULL, /* lockfunc, lockfuncarg */
488 &sc->mge_desc_dtag); /* dmat */
491 mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
493 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
496 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
497 dw = &(sc->mge_rx_desc[i]);
498 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
499 &dw->mge_desc->buffer);
502 sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
503 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
509 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
510 uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
512 struct mge_desc_wrapper *dw;
515 for (i = 0; i < size; i++) {
519 if (dw->buffer_dmap) {
521 bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
522 BUS_DMASYNC_POSTREAD);
523 bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
525 bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
529 /* Free RX descriptors */
531 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
532 BUS_DMASYNC_POSTREAD);
533 bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
534 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
541 mge_free_dma(struct mge_softc *sc)
543 /* Free desciptors and mbufs */
544 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
545 mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
547 /* Destroy mbuf dma tag */
548 bus_dma_tag_destroy(sc->mge_tx_dtag);
549 bus_dma_tag_destroy(sc->mge_rx_dtag);
550 /* Destroy descriptors tag */
551 bus_dma_tag_destroy(sc->mge_desc_dtag);
555 mge_reinit_rx(struct mge_softc *sc)
557 struct mge_desc_wrapper *dw;
560 MGE_RECEIVE_LOCK_ASSERT(sc);
562 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
564 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
567 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
568 dw = &(sc->mge_rx_desc[i]);
569 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
570 &dw->mge_desc->buffer);
573 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
574 sc->rx_desc_curr = 0;
576 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
579 /* Enable RX queue */
580 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
583 #ifdef DEVICE_POLLING
584 static poll_handler_t mge_poll;
587 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
589 struct mge_softc *sc = ifp->if_softc;
590 uint32_t int_cause, int_cause_ext;
595 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
596 MGE_GLOBAL_UNLOCK(sc);
600 if (cmd == POLL_AND_CHECK_STATUS) {
601 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
602 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
604 /* Check for resource error */
605 if (int_cause & MGE_PORT_INT_RXERRQ0)
608 if (int_cause || int_cause_ext) {
609 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
610 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
614 mge_intr_tx_locked(sc);
615 rx_npkts = mge_intr_rx_locked(sc, count);
617 MGE_GLOBAL_UNLOCK(sc);
620 #endif /* DEVICE_POLLING */
623 mge_attach(device_t dev)
625 struct mge_softc *sc;
626 struct mii_softc *miisc;
628 uint8_t hwaddr[ETHER_ADDR_LEN];
631 sc = device_get_softc(dev);
633 sc->node = ofw_bus_get_node(dev);
635 /* Set chip version-dependent parameters */
638 /* Get phy address and used softc from fdt */
639 if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) != 0)
642 /* Initialize mutexes */
643 mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
644 mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
646 /* Allocate IO and IRQ resources */
647 error = bus_alloc_resources(dev, res_spec, sc->res);
649 device_printf(dev, "could not allocate resources\n");
654 /* Allocate DMA, buffers, buffer descriptors */
655 error = mge_allocate_dma(sc);
661 sc->tx_desc_curr = 0;
662 sc->rx_desc_curr = 0;
663 sc->tx_desc_used_idx = 0;
664 sc->tx_desc_used_count = 0;
666 /* Configure defaults for interrupts coalescing */
667 sc->rx_ic_time = 768;
668 sc->tx_ic_time = 768;
671 /* Allocate network interface */
672 ifp = sc->ifp = if_alloc(IFT_ETHER);
674 device_printf(dev, "if_alloc() failed\n");
679 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
681 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
682 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
683 ifp->if_capenable = ifp->if_capabilities;
684 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
686 #ifdef DEVICE_POLLING
687 /* Advertise that polling is supported */
688 ifp->if_capabilities |= IFCAP_POLLING;
691 ifp->if_init = mge_init;
692 ifp->if_start = mge_start;
693 ifp->if_ioctl = mge_ioctl;
695 ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
696 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
697 IFQ_SET_READY(&ifp->if_snd);
699 mge_get_mac_address(sc, hwaddr);
700 ether_ifattach(ifp, hwaddr);
701 callout_init(&sc->wd_callout, 0);
704 error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
705 mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
707 device_printf(dev, "attaching PHYs failed\n");
711 sc->mii = device_get_softc(sc->miibus);
713 /* Tell the MAC where to find the PHY so autoneg works */
714 miisc = LIST_FIRST(&sc->mii->mii_phys);
715 MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
717 /* Attach interrupt handlers */
718 for (i = 0; i < 2; ++i) {
719 error = bus_setup_intr(dev, sc->res[1 + i],
720 INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
721 sc, &sc->ih_cookie[i]);
723 device_printf(dev, "could not setup %s\n",
724 mge_intrs[i].description);
734 mge_detach(device_t dev)
736 struct mge_softc *sc;
739 sc = device_get_softc(dev);
741 /* Stop controller and free TX queue */
745 /* Wait for stopping ticks */
746 callout_drain(&sc->wd_callout);
748 /* Stop and release all interrupts */
749 for (i = 0; i < 2; ++i) {
750 if (!sc->ih_cookie[i])
753 error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
755 device_printf(dev, "could not release %s\n",
756 mge_intrs[i].description);
759 /* Detach network interface */
761 ether_ifdetach(sc->ifp);
765 /* Free DMA resources */
768 /* Free IO memory handler */
769 bus_release_resources(dev, res_spec, sc->res);
771 /* Destroy mutexes */
772 mtx_destroy(&sc->receive_lock);
773 mtx_destroy(&sc->transmit_lock);
779 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
781 struct mge_softc *sc = ifp->if_softc;
782 struct mii_data *mii;
784 MGE_TRANSMIT_LOCK(sc);
789 ifmr->ifm_active = mii->mii_media_active;
790 ifmr->ifm_status = mii->mii_media_status;
792 MGE_TRANSMIT_UNLOCK(sc);
796 mge_set_port_serial_control(uint32_t media)
798 uint32_t port_config;
800 port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
801 PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
803 if (IFM_TYPE(media) == IFM_ETHER) {
804 switch(IFM_SUBTYPE(media)) {
808 port_config |= (PORT_SERIAL_GMII_SPEED_1000 |
809 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
810 PORT_SERIAL_SPEED_AUTONEG);
813 port_config |= (PORT_SERIAL_MII_SPEED_100 |
814 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
815 PORT_SERIAL_SPEED_AUTONEG);
818 port_config |= (PORT_SERIAL_AUTONEG |
819 PORT_SERIAL_AUTONEG_FC |
820 PORT_SERIAL_SPEED_AUTONEG);
824 port_config |= PORT_SERIAL_FULL_DUPLEX;
826 return (port_config);
830 mge_ifmedia_upd(struct ifnet *ifp)
832 struct mge_softc *sc = ifp->if_softc;
834 if (ifp->if_flags & IFF_UP) {
837 sc->mge_media_status = sc->mii->mii_media.ifm_media;
838 mii_mediachg(sc->mii);
841 MGE_GLOBAL_UNLOCK(sc);
850 struct mge_softc *sc = arg;
854 mge_init_locked(arg);
856 MGE_GLOBAL_UNLOCK(sc);
860 mge_init_locked(void *arg)
862 struct mge_softc *sc = arg;
863 struct mge_desc_wrapper *dw;
864 volatile uint32_t reg_val;
868 MGE_GLOBAL_LOCK_ASSERT(sc);
873 /* Disable interrupts */
874 mge_intrs_ctrl(sc, 0);
876 /* Set MAC address */
877 mge_set_mac_address(sc);
879 /* Setup multicast filters */
880 mge_setup_multicast(sc);
882 if (sc->mge_ver == 2) {
883 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
884 MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
887 /* Initialize TX queue configuration registers */
888 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
889 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
890 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
892 /* Clear TX queue configuration registers for unused queues */
893 for (i = 1; i < 7; i++) {
894 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
895 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
896 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
899 /* Set default MTU */
900 MGE_WRITE(sc, sc->mge_mtu, 0);
902 /* Port configuration */
903 MGE_WRITE(sc, MGE_PORT_CONFIG,
904 PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
905 PORT_CONFIG_ARO_RXQ(0));
906 MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
908 /* Setup port configuration */
909 reg_val = mge_set_port_serial_control(sc->mge_media_status);
910 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
912 /* Setup SDMA configuration */
913 MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
914 MGE_SDMA_TX_BYTE_SWAP |
915 MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
916 MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
918 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
920 MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
921 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
924 /* Reset descriptor indexes */
925 sc->tx_desc_curr = 0;
926 sc->rx_desc_curr = 0;
927 sc->tx_desc_used_idx = 0;
928 sc->tx_desc_used_count = 0;
930 /* Enable RX descriptors */
931 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
932 dw = &sc->mge_rx_desc[i];
933 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
934 dw->mge_desc->buff_size = MCLBYTES;
935 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
936 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
939 /* Enable RX queue */
940 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
943 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
944 reg_val |= PORT_SERIAL_ENABLE;
945 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
948 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
949 if (reg_val & MGE_STATUS_LINKUP)
953 if_printf(sc->ifp, "Timeout on link-up\n");
958 /* Setup interrupts coalescing */
962 /* Enable interrupts */
963 #ifdef DEVICE_POLLING
965 * * ...only if polling is not turned on. Disable interrupts explicitly
966 * if polling is enabled.
968 if (sc->ifp->if_capenable & IFCAP_POLLING)
969 mge_intrs_ctrl(sc, 0);
971 #endif /* DEVICE_POLLING */
972 mge_intrs_ctrl(sc, 1);
974 /* Activate network interface */
975 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
976 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
979 /* Schedule watchdog timeout */
980 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
984 mge_intr_err(void *arg)
986 struct mge_softc *sc = arg;
990 if_printf(ifp, "%s\n", __FUNCTION__);
994 mge_intr_misc(void *arg)
996 struct mge_softc *sc = arg;
1000 if_printf(ifp, "%s\n", __FUNCTION__);
1004 mge_intr_rx(void *arg) {
1005 struct mge_softc *sc = arg;
1006 uint32_t int_cause, int_cause_ext;
1008 MGE_RECEIVE_LOCK(sc);
1010 #ifdef DEVICE_POLLING
1011 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1012 MGE_RECEIVE_UNLOCK(sc);
1017 /* Get interrupt cause */
1018 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1019 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1021 /* Check for resource error */
1022 if (int_cause & MGE_PORT_INT_RXERRQ0) {
1024 MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1025 int_cause & ~MGE_PORT_INT_RXERRQ0);
1028 int_cause &= MGE_PORT_INT_RXQ0;
1029 int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1031 if (int_cause || int_cause_ext) {
1032 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1033 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1034 mge_intr_rx_locked(sc, -1);
1037 MGE_RECEIVE_UNLOCK(sc);
1042 mge_intr_rx_locked(struct mge_softc *sc, int count)
1044 struct ifnet *ifp = sc->ifp;
1047 struct mge_desc_wrapper* dw;
1051 MGE_RECEIVE_LOCK_ASSERT(sc);
1053 while (count != 0) {
1054 dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1055 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1056 BUS_DMASYNC_POSTREAD);
1059 status = dw->mge_desc->cmd_status;
1060 bufsize = dw->mge_desc->buff_size;
1061 if ((status & MGE_DMA_OWNED) != 0)
1064 if (dw->mge_desc->byte_count &&
1065 ~(status & MGE_ERR_SUMMARY)) {
1067 bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1068 BUS_DMASYNC_POSTREAD);
1070 mb = m_devget(dw->buffer->m_data,
1071 dw->mge_desc->byte_count - ETHER_CRC_LEN,
1075 /* Give up if no mbufs */
1079 mb->m_pkthdr.len -= 2;
1082 mge_offload_process_frame(ifp, mb, status,
1085 MGE_RECEIVE_UNLOCK(sc);
1086 (*ifp->if_input)(ifp, mb);
1087 MGE_RECEIVE_LOCK(sc);
1091 dw->mge_desc->byte_count = 0;
1092 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1093 sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1094 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1095 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1105 mge_intr_sum(void *arg)
1107 struct mge_softc *sc = arg;
1111 if_printf(ifp, "%s\n", __FUNCTION__);
1115 mge_intr_tx(void *arg)
1117 struct mge_softc *sc = arg;
1118 uint32_t int_cause_ext;
1120 MGE_TRANSMIT_LOCK(sc);
1122 #ifdef DEVICE_POLLING
1123 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1124 MGE_TRANSMIT_UNLOCK(sc);
1129 /* Ack the interrupt */
1130 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1131 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
1132 int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
1134 mge_intr_tx_locked(sc);
1136 MGE_TRANSMIT_UNLOCK(sc);
1141 mge_intr_tx_locked(struct mge_softc *sc)
1143 struct ifnet *ifp = sc->ifp;
1144 struct mge_desc_wrapper *dw;
1145 struct mge_desc *desc;
1149 MGE_TRANSMIT_LOCK_ASSERT(sc);
1151 /* Disable watchdog */
1154 while (sc->tx_desc_used_count) {
1155 /* Get the descriptor */
1156 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1157 desc = dw->mge_desc;
1158 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1159 BUS_DMASYNC_POSTREAD);
1161 /* Get descriptor status */
1162 status = desc->cmd_status;
1164 if (status & MGE_DMA_OWNED)
1167 sc->tx_desc_used_idx =
1168 (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1169 sc->tx_desc_used_count--;
1171 /* Update collision statistics */
1172 if (status & MGE_ERR_SUMMARY) {
1173 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1174 ifp->if_collisions++;
1175 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1176 ifp->if_collisions += 16;
1179 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1180 BUS_DMASYNC_POSTWRITE);
1181 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1182 m_freem(dw->buffer);
1183 dw->buffer = (struct mbuf*)NULL;
1190 /* Now send anything that was pending */
1191 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1192 mge_start_locked(ifp);
1197 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1199 struct mge_softc *sc = ifp->if_softc;
1200 struct ifreq *ifr = (struct ifreq *)data;
1208 MGE_GLOBAL_LOCK(sc);
1210 if (ifp->if_flags & IFF_UP) {
1211 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1212 flags = ifp->if_flags ^ sc->mge_if_flags;
1213 if (flags & IFF_PROMISC)
1214 mge_set_prom_mode(sc,
1215 MGE_RX_DEFAULT_QUEUE);
1217 if (flags & IFF_ALLMULTI)
1218 mge_setup_multicast(sc);
1220 mge_init_locked(sc);
1222 else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1225 sc->mge_if_flags = ifp->if_flags;
1226 MGE_GLOBAL_UNLOCK(sc);
1230 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1231 MGE_GLOBAL_LOCK(sc);
1232 mge_setup_multicast(sc);
1233 MGE_GLOBAL_UNLOCK(sc);
1237 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1238 if (mask & IFCAP_HWCSUM) {
1239 ifp->if_capenable &= ~IFCAP_HWCSUM;
1240 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1241 if (ifp->if_capenable & IFCAP_TXCSUM)
1242 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1244 ifp->if_hwassist = 0;
1246 #ifdef DEVICE_POLLING
1247 if (mask & IFCAP_POLLING) {
1248 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1249 error = ether_poll_register(mge_poll, ifp);
1253 MGE_GLOBAL_LOCK(sc);
1254 mge_intrs_ctrl(sc, 0);
1255 ifp->if_capenable |= IFCAP_POLLING;
1256 MGE_GLOBAL_UNLOCK(sc);
1258 error = ether_poll_deregister(ifp);
1259 MGE_GLOBAL_LOCK(sc);
1260 mge_intrs_ctrl(sc, 1);
1261 ifp->if_capenable &= ~IFCAP_POLLING;
1262 MGE_GLOBAL_UNLOCK(sc);
1267 case SIOCGIFMEDIA: /* fall through */
1269 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1270 && !(ifr->ifr_media & IFM_FDX)) {
1271 device_printf(sc->dev,
1272 "1000baseTX half-duplex unsupported\n");
1275 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1278 error = ether_ioctl(ifp, command, data);
1284 mge_miibus_readreg(device_t dev, int phy, int reg)
1286 struct mge_softc *sc;
1289 sc = device_get_softc(dev);
1291 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
1292 (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1294 retries = MGE_SMI_READ_RETRIES;
1296 !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
1297 DELAY(MGE_SMI_READ_DELAY);
1300 device_printf(dev, "Timeout while reading from PHY\n");
1302 return (MGE_READ(sc->phy_sc, MGE_REG_SMI) & 0xffff);
1306 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1308 struct mge_softc *sc;
1311 sc = device_get_softc(dev);
1313 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
1314 (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1316 retries = MGE_SMI_WRITE_RETRIES;
1317 while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
1318 DELAY(MGE_SMI_WRITE_DELAY);
1321 device_printf(dev, "Timeout while writing to PHY\n");
1326 mge_probe(device_t dev)
1329 if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1332 device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1333 return (BUS_PROBE_DEFAULT);
1337 mge_resume(device_t dev)
1340 device_printf(dev, "%s\n", __FUNCTION__);
1345 mge_shutdown(device_t dev)
1347 struct mge_softc *sc = device_get_softc(dev);
1349 MGE_GLOBAL_LOCK(sc);
1351 #ifdef DEVICE_POLLING
1352 if (sc->ifp->if_capenable & IFCAP_POLLING)
1353 ether_poll_deregister(sc->ifp);
1358 MGE_GLOBAL_UNLOCK(sc);
1364 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1366 struct mge_desc_wrapper *dw = NULL;
1368 bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1376 /* Check for free descriptors */
1377 if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1378 /* No free descriptors */
1382 /* Fetch unused map */
1383 desc_no = sc->tx_desc_curr;
1384 dw = &sc->mge_tx_desc[desc_no];
1385 mapp = dw->buffer_dmap;
1387 /* Create mapping in DMA memory */
1388 error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1390 if (error != 0 || nsegs != 1 ) {
1391 bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1392 return ((error != 0) ? error : -1);
1395 bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1397 /* Everything is ok, now we can send buffers */
1398 for (seg = 0; seg < nsegs; seg++) {
1399 dw->mge_desc->byte_count = segs[seg].ds_len;
1400 dw->mge_desc->buffer = segs[seg].ds_addr;
1402 dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
1403 MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1407 mge_offload_setup_descriptor(sc, dw);
1410 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1411 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1413 sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1414 sc->tx_desc_used_count++;
1421 struct mge_softc *sc = msc;
1423 /* Check for TX timeout */
1428 /* Check for media type change */
1429 if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1430 mge_ifmedia_upd(sc->ifp);
1432 /* Schedule another timeout one second from now */
1433 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1437 mge_watchdog(struct mge_softc *sc)
1443 MGE_GLOBAL_LOCK(sc);
1445 if (sc->wd_timer == 0 || --sc->wd_timer) {
1446 MGE_GLOBAL_UNLOCK(sc);
1451 if_printf(ifp, "watchdog timeout\n");
1454 mge_init_locked(sc);
1456 MGE_GLOBAL_UNLOCK(sc);
1460 mge_start(struct ifnet *ifp)
1462 struct mge_softc *sc = ifp->if_softc;
1464 MGE_TRANSMIT_LOCK(sc);
1466 mge_start_locked(ifp);
1468 MGE_TRANSMIT_UNLOCK(sc);
1472 mge_start_locked(struct ifnet *ifp)
1474 struct mge_softc *sc;
1475 struct mbuf *m0, *mtmp;
1476 uint32_t reg_val, queued = 0;
1480 MGE_TRANSMIT_LOCK_ASSERT(sc);
1482 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1487 /* Get packet from the queue */
1488 IF_DEQUEUE(&ifp->if_snd, m0);
1492 mtmp = m_defrag(m0, M_NOWAIT);
1496 if (mge_encap(sc, m0)) {
1497 IF_PREPEND(&ifp->if_snd, m0);
1498 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1506 /* Enable transmitter and watchdog timer */
1507 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1508 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1514 mge_stop(struct mge_softc *sc)
1517 volatile uint32_t reg_val, status;
1518 struct mge_desc_wrapper *dw;
1519 struct mge_desc *desc;
1524 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1527 /* Stop tick engine */
1528 callout_stop(&sc->wd_callout);
1530 /* Disable interface */
1531 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1534 /* Disable interrupts */
1535 mge_intrs_ctrl(sc, 0);
1537 /* Disable Rx and Tx */
1538 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1539 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1540 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1542 /* Remove pending data from TX queue */
1543 while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1544 sc->tx_desc_used_count) {
1545 /* Get the descriptor */
1546 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1547 desc = dw->mge_desc;
1548 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1549 BUS_DMASYNC_POSTREAD);
1551 /* Get descriptor status */
1552 status = desc->cmd_status;
1554 if (status & MGE_DMA_OWNED)
1557 sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1559 sc->tx_desc_used_count--;
1561 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1562 BUS_DMASYNC_POSTWRITE);
1563 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1565 m_freem(dw->buffer);
1566 dw->buffer = (struct mbuf*)NULL;
1569 /* Wait for end of transmission */
1572 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1573 if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1574 (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1580 if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1583 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1584 reg_val &= ~(PORT_SERIAL_ENABLE);
1585 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1589 mge_suspend(device_t dev)
1592 device_printf(dev, "%s\n", __FUNCTION__);
1597 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1598 uint32_t status, uint16_t bufsize)
1602 if (ifp->if_capenable & IFCAP_RXCSUM) {
1603 if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1604 csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1606 if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1607 (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1608 (status & MGE_RX_L4_CSUM_OK)) {
1609 csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1610 frame->m_pkthdr.csum_data = 0xFFFF;
1613 frame->m_pkthdr.csum_flags = csum_flags;
1618 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1620 struct mbuf *m0 = dw->buffer;
1621 struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1622 int csum_flags = m0->m_pkthdr.csum_flags;
1628 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1629 etype = ntohs(eh->evl_proto);
1630 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1631 csum_flags |= MGE_TX_VLAN_TAGGED;
1633 etype = ntohs(eh->evl_encap_proto);
1634 ehlen = ETHER_HDR_LEN;
1637 if (etype != ETHERTYPE_IP) {
1639 "TCP/IP Offload enabled for unsupported "
1644 ip = (struct ip *)(m0->m_data + ehlen);
1645 cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1647 if ((m0->m_flags & M_FRAG) == 0)
1648 cmd_status |= MGE_TX_NOT_FRAGMENT;
1651 if (csum_flags & CSUM_IP)
1652 cmd_status |= MGE_TX_GEN_IP_CSUM;
1654 if (csum_flags & CSUM_TCP)
1655 cmd_status |= MGE_TX_GEN_L4_CSUM;
1657 if (csum_flags & CSUM_UDP)
1658 cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1660 dw->mge_desc->cmd_status |= cmd_status;
1664 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1668 MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1669 MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1670 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1671 MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1672 MGE_PORT_INT_EXT_TXBUF0);
1674 MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1675 MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1677 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1678 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1680 MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1681 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1686 mge_crc8(uint8_t *data, int size)
1689 static const uint8_t ct[256] = {
1690 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1691 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1692 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1693 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1694 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1695 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1696 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1697 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1698 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1699 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1700 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1701 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1702 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1703 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1704 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1705 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1706 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1707 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1708 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1709 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1710 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1711 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1712 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1713 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1714 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1715 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1716 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1717 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1718 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1719 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1720 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1721 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1725 crc = ct[crc ^ *(data++)];
1731 mge_setup_multicast(struct mge_softc *sc)
1733 uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1734 uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1735 uint32_t smt[MGE_MCAST_REG_NUMBER];
1736 uint32_t omt[MGE_MCAST_REG_NUMBER];
1737 struct ifnet *ifp = sc->ifp;
1738 struct ifmultiaddr *ifma;
1742 if (ifp->if_flags & IFF_ALLMULTI) {
1743 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1744 smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1746 memset(smt, 0, sizeof(smt));
1747 memset(omt, 0, sizeof(omt));
1749 if_maddr_rlock(ifp);
1750 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1751 if (ifma->ifma_addr->sa_family != AF_LINK)
1754 mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1755 if (memcmp(mac, special, sizeof(special)) == 0) {
1757 smt[i >> 2] |= v << ((i & 0x03) << 3);
1759 i = mge_crc8(mac, ETHER_ADDR_LEN);
1760 omt[i >> 2] |= v << ((i & 0x03) << 3);
1763 if_maddr_runlock(ifp);
1766 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1767 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1768 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1773 mge_set_rxic(struct mge_softc *sc)
1777 if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1778 sc->rx_ic_time = sc->mge_rx_ipg_max;
1780 reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1781 reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1782 reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1783 MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1787 mge_set_txic(struct mge_softc *sc)
1791 if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1792 sc->tx_ic_time = sc->mge_tfut_ipg_max;
1794 reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1795 reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1796 reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1797 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1801 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1803 struct mge_softc *sc = (struct mge_softc *)arg1;
1807 time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1808 error = sysctl_handle_int(oidp, &time, 0, req);
1812 MGE_GLOBAL_LOCK(sc);
1813 if (arg2 == MGE_IC_RX) {
1814 sc->rx_ic_time = time;
1817 sc->tx_ic_time = time;
1820 MGE_GLOBAL_UNLOCK(sc);
1826 mge_add_sysctls(struct mge_softc *sc)
1828 struct sysctl_ctx_list *ctx;
1829 struct sysctl_oid_list *children;
1830 struct sysctl_oid *tree;
1832 ctx = device_get_sysctl_ctx(sc->dev);
1833 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1834 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1835 CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1836 children = SYSCTL_CHILDREN(tree);
1838 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1839 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1840 "I", "IC RX time threshold");
1841 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1842 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1843 "I", "IC TX time threshold");