2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5 * Developed by Semihalf.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of MARVELL nor the names of contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
50 #include <net/ethernet.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
63 #include <sys/sockio.h>
65 #include <machine/bus.h>
67 #include <machine/resource.h>
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
73 #define MII_ADDR_BASE 8
76 #include <dev/mge/if_mgevar.h>
77 #include <arm/mv/mvreg.h>
78 #include <arm/mv/mvvar.h>
80 #include "miibus_if.h"
82 /* PHY registers are in the address space of the first mge unit */
83 static struct mge_softc *sc_mge0 = NULL;
85 static int mge_probe(device_t dev);
86 static int mge_attach(device_t dev);
87 static int mge_detach(device_t dev);
88 static int mge_shutdown(device_t dev);
89 static int mge_suspend(device_t dev);
90 static int mge_resume(device_t dev);
92 static int mge_miibus_readreg(device_t dev, int phy, int reg);
93 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
95 static int mge_ifmedia_upd(struct ifnet *ifp);
96 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
98 static void mge_init(void *arg);
99 static void mge_init_locked(void *arg);
100 static void mge_start(struct ifnet *ifp);
101 static void mge_start_locked(struct ifnet *ifp);
102 static void mge_watchdog(struct mge_softc *sc);
103 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
105 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
106 static uint32_t mge_rx_ipg(uint32_t val, int ver);
107 static void mge_ver_params(struct mge_softc *sc);
109 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
110 static void mge_intr_rx(void *arg);
111 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
112 static void mge_intr_tx(void *arg);
113 static void mge_intr_tx_locked(struct mge_softc *sc);
114 static void mge_intr_misc(void *arg);
115 static void mge_intr_sum(void *arg);
116 static void mge_intr_err(void *arg);
117 static void mge_stop(struct mge_softc *sc);
118 static void mge_tick(void *msc);
119 static uint32_t mge_set_port_serial_control(uint32_t media);
120 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
121 static void mge_set_mac_address(struct mge_softc *sc);
122 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
124 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
125 static int mge_allocate_dma(struct mge_softc *sc);
126 static int mge_alloc_desc_dma(struct mge_softc *sc,
127 struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
128 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
129 struct mbuf **mbufp, bus_addr_t *paddr);
130 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
131 static void mge_free_dma(struct mge_softc *sc);
132 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
133 bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
134 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
135 uint32_t status, uint16_t bufsize);
136 static void mge_offload_setup_descriptor(struct mge_softc *sc,
137 struct mge_desc_wrapper *dw);
138 static uint8_t mge_crc8(uint8_t *data, int size);
139 static void mge_setup_multicast(struct mge_softc *sc);
140 static void mge_set_rxic(struct mge_softc *sc);
141 static void mge_set_txic(struct mge_softc *sc);
142 static void mge_add_sysctls(struct mge_softc *sc);
143 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
145 static device_method_t mge_methods[] = {
146 /* Device interface */
147 DEVMETHOD(device_probe, mge_probe),
148 DEVMETHOD(device_attach, mge_attach),
149 DEVMETHOD(device_detach, mge_detach),
150 DEVMETHOD(device_shutdown, mge_shutdown),
151 DEVMETHOD(device_suspend, mge_suspend),
152 DEVMETHOD(device_resume, mge_resume),
154 DEVMETHOD(miibus_readreg, mge_miibus_readreg),
155 DEVMETHOD(miibus_writereg, mge_miibus_writereg),
159 static driver_t mge_driver = {
162 sizeof(struct mge_softc),
165 static devclass_t mge_devclass;
167 DRIVER_MODULE(mge, mbus, mge_driver, mge_devclass, 0, 0);
168 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
169 MODULE_DEPEND(mge, ether, 1, 1, 1);
170 MODULE_DEPEND(mge, miibus, 1, 1, 1);
172 static struct resource_spec res_spec[] = {
173 { SYS_RES_MEMORY, 0, RF_ACTIVE },
174 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
175 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
176 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
177 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
178 { SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
183 driver_intr_t *handler;
185 } mge_intrs[MGE_INTR_COUNT] = {
186 { mge_intr_rx, "GbE receive interrupt" },
187 { mge_intr_tx, "GbE transmit interrupt" },
188 { mge_intr_misc,"GbE misc interrupt" },
189 { mge_intr_sum, "GbE summary interrupt" },
190 { mge_intr_err, "GbE error interrupt" },
194 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
196 uint32_t mac_l, mac_h;
198 /* XXX use currently programmed MAC address; eventually this info will
199 * be provided by the loader */
201 mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
202 mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
204 addr[0] = (mac_h & 0xff000000) >> 24;
205 addr[1] = (mac_h & 0x00ff0000) >> 16;
206 addr[2] = (mac_h & 0x0000ff00) >> 8;
207 addr[3] = (mac_h & 0x000000ff);
208 addr[4] = (mac_l & 0x0000ff00) >> 8;
209 addr[5] = (mac_l & 0x000000ff);
213 mge_tfut_ipg(uint32_t val, int ver)
218 return ((val & 0x3fff) << 4);
221 return ((val & 0xffff) << 4);
226 mge_rx_ipg(uint32_t val, int ver)
231 return ((val & 0x3fff) << 8);
234 return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
239 mge_ver_params(struct mge_softc *sc)
244 if (d == MV_DEV_88F6281 || d == MV_DEV_MV78100 ||
245 d == MV_DEV_MV78100_Z0) {
248 sc->mge_tfut_ipg_max = 0xFFFF;
249 sc->mge_rx_ipg_max = 0xFFFF;
250 sc->mge_tx_arb_cfg = 0xFC0000FF;
251 sc->mge_tx_tok_cfg = 0xFFFF7FFF;
252 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
256 sc->mge_tfut_ipg_max = 0x3FFF;
257 sc->mge_rx_ipg_max = 0x3FFF;
258 sc->mge_tx_arb_cfg = 0x000000FF;
259 sc->mge_tx_tok_cfg = 0x3FFFFFFF;
260 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
265 mge_set_mac_address(struct mge_softc *sc)
268 uint32_t mac_l, mac_h;
270 MGE_GLOBAL_LOCK_ASSERT(sc);
272 if_mac = (char *)IF_LLADDR(sc->ifp);
274 mac_l = (if_mac[4] << 8) | (if_mac[5]);
275 mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
276 (if_mac[2] << 8) | (if_mac[3] << 0);
278 MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
279 MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
281 mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
285 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
287 uint32_t reg_idx, reg_off, reg_val, i;
290 reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
291 reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
292 reg_val = (1 | (queue << 1)) << reg_off;
294 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
296 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
298 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
303 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
305 uint32_t port_config;
308 /* Enable or disable promiscuous mode as needed */
309 if (sc->ifp->if_flags & IFF_PROMISC) {
310 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
311 port_config |= PORT_CONFIG_UPM;
312 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
314 reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
315 (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
317 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
318 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
319 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
322 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
323 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
326 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
327 port_config &= ~PORT_CONFIG_UPM;
328 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
330 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
331 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
332 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
335 mge_set_mac_address(sc);
340 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
344 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
347 *paddr = segs->ds_addr;
351 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
354 struct mbuf *new_mbuf;
355 bus_dma_segment_t seg[1];
359 KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
361 new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
362 if (new_mbuf == NULL)
364 new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
367 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
368 bus_dmamap_unload(tag, map);
371 error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
373 KASSERT(nsegs == 1, ("Too many segments returned!"));
374 if (nsegs != 1 || error)
375 panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
377 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
380 (*paddr) = seg->ds_addr;
385 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
386 uint32_t size, bus_dma_tag_t *buffer_tag)
388 struct mge_desc_wrapper *dw;
389 bus_addr_t desc_paddr;
393 for (i = size - 1; i >= 0; i--) {
395 error = bus_dmamem_alloc(sc->mge_desc_dtag,
396 (void**)&(dw->mge_desc),
397 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
401 if_printf(sc->ifp, "failed to allocate DMA memory\n");
406 error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
407 dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
408 &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
411 if_printf(sc->ifp, "can't load descriptor\n");
412 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
418 /* Chain descriptors */
419 dw->mge_desc->next_desc = desc_paddr;
420 desc_paddr = dw->mge_desc_paddr;
422 tab[size - 1].mge_desc->next_desc = desc_paddr;
424 /* Allocate a busdma tag for mbufs. */
425 error = bus_dma_tag_create(NULL, /* parent */
426 8, 0, /* alignment, boundary */
427 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
428 BUS_SPACE_MAXADDR, /* highaddr */
429 NULL, NULL, /* filtfunc, filtfuncarg */
430 MCLBYTES, 1, /* maxsize, nsegments */
431 MCLBYTES, 0, /* maxsegsz, flags */
432 NULL, NULL, /* lockfunc, lockfuncarg */
433 buffer_tag); /* dmat */
435 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
439 /* Create TX busdma maps */
440 for (i = 0; i < size; i++) {
442 error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
444 if_printf(sc->ifp, "failed to create map for mbuf\n");
448 dw->buffer = (struct mbuf*)NULL;
449 dw->mge_desc->buffer = (bus_addr_t)NULL;
456 mge_allocate_dma(struct mge_softc *sc)
459 struct mge_desc_wrapper *dw;
463 num = MGE_TX_DESC_NUM + MGE_RX_DESC_NUM;
465 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
466 error = bus_dma_tag_create(NULL, /* parent */
467 16, 0, /* alignment, boundary */
468 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
469 BUS_SPACE_MAXADDR, /* highaddr */
470 NULL, NULL, /* filtfunc, filtfuncarg */
471 sizeof(struct mge_desc), 1, /* maxsize, nsegments */
472 sizeof(struct mge_desc), 0, /* maxsegsz, flags */
473 NULL, NULL, /* lockfunc, lockfuncarg */
474 &sc->mge_desc_dtag); /* dmat */
477 mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
479 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
482 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
483 dw = &(sc->mge_rx_desc[i]);
484 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
485 &dw->mge_desc->buffer);
488 sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
489 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
495 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
496 uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
498 struct mge_desc_wrapper *dw;
501 for (i = 0; i < size; i++) {
505 if (dw->buffer_dmap) {
507 bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
508 BUS_DMASYNC_POSTREAD);
509 bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
511 bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
515 /* Free RX descriptors */
517 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
518 BUS_DMASYNC_POSTREAD);
519 bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
520 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
527 mge_free_dma(struct mge_softc *sc)
529 /* Free desciptors and mbufs */
530 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
531 mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
533 /* Destroy mbuf dma tag */
534 bus_dma_tag_destroy(sc->mge_tx_dtag);
535 bus_dma_tag_destroy(sc->mge_rx_dtag);
536 /* Destroy descriptors tag */
537 bus_dma_tag_destroy(sc->mge_desc_dtag);
541 mge_reinit_rx(struct mge_softc *sc)
543 struct mge_desc_wrapper *dw;
546 MGE_RECEIVE_LOCK(sc);
548 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
550 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
553 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
554 dw = &(sc->mge_rx_desc[i]);
555 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
556 &dw->mge_desc->buffer);
559 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
560 sc->rx_desc_curr = 0;
562 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
565 /* Enable RX queue */
566 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
568 MGE_RECEIVE_UNLOCK(sc);
571 #ifdef DEVICE_POLLING
572 static poll_handler_t mge_poll;
575 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
577 struct mge_softc *sc = ifp->if_softc;
578 uint32_t int_cause, int_cause_ext;
583 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
584 MGE_GLOBAL_UNLOCK(sc);
588 if (cmd == POLL_AND_CHECK_STATUS) {
589 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
590 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
592 /* Check for resource error */
593 if (int_cause & MGE_PORT_INT_RXERRQ0)
596 if (int_cause || int_cause_ext) {
597 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
598 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
602 mge_intr_tx_locked(sc);
603 rx_npkts = mge_intr_rx_locked(sc, count);
605 MGE_GLOBAL_UNLOCK(sc);
608 #endif /* DEVICE_POLLING */
611 mge_attach(device_t dev)
613 struct mge_softc *sc;
615 uint8_t hwaddr[ETHER_ADDR_LEN];
618 sc = device_get_softc(dev);
621 if (device_get_unit(dev) == 0)
624 /* Set chip version-dependent parameters */
627 /* Initialize mutexes */
628 mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
629 mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
631 /* Allocate IO and IRQ resources */
632 error = bus_alloc_resources(dev, res_spec, sc->res);
634 device_printf(dev, "could not allocate resources\n");
639 /* Allocate DMA, buffers, buffer descriptors */
640 error = mge_allocate_dma(sc);
646 sc->tx_desc_curr = 0;
647 sc->rx_desc_curr = 0;
648 sc->tx_desc_used_idx = 0;
649 sc->tx_desc_used_count = 0;
651 /* Configure defaults for interrupts coalescing */
652 sc->rx_ic_time = 768;
653 sc->tx_ic_time = 768;
656 /* Allocate network interface */
657 ifp = sc->ifp = if_alloc(IFT_ETHER);
659 device_printf(dev, "if_alloc() failed\n");
664 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
666 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
667 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
668 ifp->if_capenable = ifp->if_capabilities;
669 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
671 #ifdef DEVICE_POLLING
672 /* Advertise that polling is supported */
673 ifp->if_capabilities |= IFCAP_POLLING;
676 ifp->if_init = mge_init;
677 ifp->if_start = mge_start;
678 ifp->if_ioctl = mge_ioctl;
680 ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
681 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
682 IFQ_SET_READY(&ifp->if_snd);
684 mge_get_mac_address(sc, hwaddr);
685 ether_ifattach(ifp, hwaddr);
686 callout_init(&sc->wd_callout, 0);
689 error = mii_phy_probe(dev, &sc->miibus, mge_ifmedia_upd, mge_ifmedia_sts);
691 device_printf(dev, "MII failed to find PHY\n");
697 sc->mii = device_get_softc(sc->miibus);
699 /* Attach interrupt handlers */
700 for (i = 0; i < 2; ++i) {
701 error = bus_setup_intr(dev, sc->res[1 + i],
702 INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
703 sc, &sc->ih_cookie[i]);
705 device_printf(dev, "could not setup %s\n",
706 mge_intrs[i].description);
707 ether_ifdetach(sc->ifp);
716 mge_detach(device_t dev)
718 struct mge_softc *sc;
721 sc = device_get_softc(dev);
723 /* Stop controller and free TX queue */
727 /* Wait for stopping ticks */
728 callout_drain(&sc->wd_callout);
730 /* Stop and release all interrupts */
731 for (i = 0; i < 2; ++i) {
732 error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
734 device_printf(dev, "could not release %s\n",
735 mge_intrs[i].description);
738 /* Detach network interface */
740 ether_ifdetach(sc->ifp);
744 /* Free DMA resources */
747 /* Free IO memory handler */
748 bus_release_resources(dev, res_spec, sc->res);
750 /* Destroy mutexes */
751 mtx_destroy(&sc->receive_lock);
752 mtx_destroy(&sc->transmit_lock);
758 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
760 struct mge_softc *sc = ifp->if_softc;
761 struct mii_data *mii;
763 MGE_TRANSMIT_LOCK(sc);
768 ifmr->ifm_active = mii->mii_media_active;
769 ifmr->ifm_status = mii->mii_media_status;
771 MGE_TRANSMIT_UNLOCK(sc);
775 mge_set_port_serial_control(uint32_t media)
777 uint32_t port_config;
779 port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
780 PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
782 if (IFM_TYPE(media) == IFM_ETHER) {
783 switch(IFM_SUBTYPE(media)) {
787 port_config |= (PORT_SERIAL_GMII_SPEED_1000 |
788 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
789 PORT_SERIAL_SPEED_AUTONEG);
792 port_config |= (PORT_SERIAL_MII_SPEED_100 |
793 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
794 PORT_SERIAL_SPEED_AUTONEG);
797 port_config |= (PORT_SERIAL_AUTONEG |
798 PORT_SERIAL_AUTONEG_FC |
799 PORT_SERIAL_SPEED_AUTONEG);
803 port_config |= PORT_SERIAL_FULL_DUPLEX;
805 return (port_config);
809 mge_ifmedia_upd(struct ifnet *ifp)
811 struct mge_softc *sc = ifp->if_softc;
813 if (ifp->if_flags & IFF_UP) {
816 sc->mge_media_status = sc->mii->mii_media.ifm_media;
817 mii_mediachg(sc->mii);
820 MGE_GLOBAL_UNLOCK(sc);
829 struct mge_softc *sc = arg;
833 mge_init_locked(arg);
835 MGE_GLOBAL_UNLOCK(sc);
839 mge_init_locked(void *arg)
841 struct mge_softc *sc = arg;
842 struct mge_desc_wrapper *dw;
843 volatile uint32_t reg_val;
847 MGE_GLOBAL_LOCK_ASSERT(sc);
852 /* Disable interrupts */
853 mge_intrs_ctrl(sc, 0);
855 /* Set MAC address */
856 mge_set_mac_address(sc);
858 /* Setup multicast filters */
859 mge_setup_multicast(sc);
861 if (sc->mge_ver == 2) {
862 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
863 MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
866 /* Initialize TX queue configuration registers */
867 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
868 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
869 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
871 /* Clear TX queue configuration registers for unused queues */
872 for (i = 1; i < 7; i++) {
873 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
874 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
875 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
878 /* Set default MTU */
879 MGE_WRITE(sc, sc->mge_mtu, 0);
881 /* Port configuration */
882 MGE_WRITE(sc, MGE_PORT_CONFIG,
883 PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
884 PORT_CONFIG_ARO_RXQ(0));
885 MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
887 /* Setup port configuration */
888 reg_val = mge_set_port_serial_control(sc->mge_media_status);
889 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
891 /* Setup SDMA configuration */
892 MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
893 MGE_SDMA_TX_BYTE_SWAP |
894 MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
895 MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
897 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
899 MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
900 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
903 /* Reset descriptor indexes */
904 sc->tx_desc_curr = 0;
905 sc->rx_desc_curr = 0;
906 sc->tx_desc_used_idx = 0;
907 sc->tx_desc_used_count = 0;
909 /* Enable RX descriptors */
910 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
911 dw = &sc->mge_rx_desc[i];
912 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
913 dw->mge_desc->buff_size = MCLBYTES;
914 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
915 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
918 /* Enable RX queue */
919 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
922 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
923 reg_val |= PORT_SERIAL_ENABLE;
924 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
927 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
928 if (reg_val & MGE_STATUS_LINKUP)
932 if_printf(sc->ifp, "Timeout on link-up\n");
937 /* Setup interrupts coalescing */
941 /* Enable interrupts */
942 #ifdef DEVICE_POLLING
944 * * ...only if polling is not turned on. Disable interrupts explicitly
945 * if polling is enabled.
947 if (sc->ifp->if_capenable & IFCAP_POLLING)
948 mge_intrs_ctrl(sc, 0);
950 #endif /* DEVICE_POLLING */
951 mge_intrs_ctrl(sc, 1);
953 /* Activate network interface */
954 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
955 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
958 /* Schedule watchdog timeout */
959 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
963 mge_intr_err(void *arg)
965 struct mge_softc *sc = arg;
969 if_printf(ifp, "%s\n", __FUNCTION__);
973 mge_intr_misc(void *arg)
975 struct mge_softc *sc = arg;
979 if_printf(ifp, "%s\n", __FUNCTION__);
983 mge_intr_rx(void *arg) {
984 struct mge_softc *sc = arg;
985 uint32_t int_cause, int_cause_ext;
987 MGE_RECEIVE_LOCK(sc);
989 #ifdef DEVICE_POLLING
990 if (sc->ifp->if_capenable & IFCAP_POLLING) {
991 MGE_RECEIVE_UNLOCK(sc);
996 /* Get interrupt cause */
997 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
998 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1000 /* Check for resource error */
1001 if (int_cause & MGE_PORT_INT_RXERRQ0) {
1003 MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1004 int_cause & ~MGE_PORT_INT_RXERRQ0);
1007 int_cause &= MGE_PORT_INT_RXQ0;
1008 int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1010 if (int_cause || int_cause_ext) {
1011 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1012 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1013 mge_intr_rx_locked(sc, -1);
1016 MGE_RECEIVE_UNLOCK(sc);
1021 mge_intr_rx_locked(struct mge_softc *sc, int count)
1023 struct ifnet *ifp = sc->ifp;
1026 struct mge_desc_wrapper* dw;
1030 MGE_RECEIVE_LOCK_ASSERT(sc);
1032 while (count != 0) {
1033 dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1034 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1035 BUS_DMASYNC_POSTREAD);
1038 status = dw->mge_desc->cmd_status;
1039 bufsize = dw->mge_desc->buff_size;
1040 if ((status & MGE_DMA_OWNED) != 0)
1043 if (dw->mge_desc->byte_count &&
1044 ~(status & MGE_ERR_SUMMARY)) {
1046 bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1047 BUS_DMASYNC_POSTREAD);
1049 mb = m_devget(dw->buffer->m_data,
1050 dw->mge_desc->byte_count - ETHER_CRC_LEN,
1054 /* Give up if no mbufs */
1058 mb->m_pkthdr.len -= 2;
1061 mge_offload_process_frame(ifp, mb, status,
1064 MGE_RECEIVE_UNLOCK(sc);
1065 (*ifp->if_input)(ifp, mb);
1066 MGE_RECEIVE_LOCK(sc);
1070 dw->mge_desc->byte_count = 0;
1071 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1072 sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1073 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1074 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1084 mge_intr_sum(void *arg)
1086 struct mge_softc *sc = arg;
1090 if_printf(ifp, "%s\n", __FUNCTION__);
1094 mge_intr_tx(void *arg)
1096 struct mge_softc *sc = arg;
1097 uint32_t int_cause_ext;
1099 MGE_TRANSMIT_LOCK(sc);
1101 #ifdef DEVICE_POLLING
1102 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1103 MGE_TRANSMIT_UNLOCK(sc);
1108 /* Ack the interrupt */
1109 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1110 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
1111 int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
1113 mge_intr_tx_locked(sc);
1115 MGE_TRANSMIT_UNLOCK(sc);
1120 mge_intr_tx_locked(struct mge_softc *sc)
1122 struct ifnet *ifp = sc->ifp;
1123 struct mge_desc_wrapper *dw;
1124 struct mge_desc *desc;
1128 MGE_TRANSMIT_LOCK_ASSERT(sc);
1130 /* Disable watchdog */
1133 while (sc->tx_desc_used_count) {
1134 /* Get the descriptor */
1135 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1136 desc = dw->mge_desc;
1137 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1138 BUS_DMASYNC_POSTREAD);
1140 /* Get descriptor status */
1141 status = desc->cmd_status;
1143 if (status & MGE_DMA_OWNED)
1146 sc->tx_desc_used_idx =
1147 (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;;
1148 sc->tx_desc_used_count--;
1150 /* Update collision statistics */
1151 if (status & MGE_ERR_SUMMARY) {
1152 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1153 ifp->if_collisions++;
1154 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1155 ifp->if_collisions += 16;
1158 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1159 BUS_DMASYNC_POSTWRITE);
1160 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1161 m_freem(dw->buffer);
1162 dw->buffer = (struct mbuf*)NULL;
1169 /* Now send anything that was pending */
1170 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1171 mge_start_locked(ifp);
1176 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1178 struct mge_softc *sc = ifp->if_softc;
1179 struct ifreq *ifr = (struct ifreq *)data;
1187 MGE_GLOBAL_LOCK(sc);
1189 if (ifp->if_flags & IFF_UP) {
1190 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1191 flags = ifp->if_flags ^ sc->mge_if_flags;
1192 if (flags & IFF_PROMISC)
1193 mge_set_prom_mode(sc,
1194 MGE_RX_DEFAULT_QUEUE);
1196 if (flags & IFF_ALLMULTI)
1197 mge_setup_multicast(sc);
1199 mge_init_locked(sc);
1201 else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1204 sc->mge_if_flags = ifp->if_flags;
1205 MGE_GLOBAL_UNLOCK(sc);
1209 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1210 MGE_GLOBAL_LOCK(sc);
1211 mge_setup_multicast(sc);
1212 MGE_GLOBAL_UNLOCK(sc);
1216 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1217 if (mask & IFCAP_HWCSUM) {
1218 ifp->if_capenable &= ~IFCAP_HWCSUM;
1219 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1220 if (ifp->if_capenable & IFCAP_TXCSUM)
1221 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1223 ifp->if_hwassist = 0;
1225 #ifdef DEVICE_POLLING
1226 if (mask & IFCAP_POLLING) {
1227 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1228 error = ether_poll_register(mge_poll, ifp);
1232 MGE_GLOBAL_LOCK(sc);
1233 mge_intrs_ctrl(sc, 0);
1234 ifp->if_capenable |= IFCAP_POLLING;
1235 MGE_GLOBAL_UNLOCK(sc);
1237 error = ether_poll_deregister(ifp);
1238 MGE_GLOBAL_LOCK(sc);
1239 mge_intrs_ctrl(sc, 1);
1240 ifp->if_capenable &= ~IFCAP_POLLING;
1241 MGE_GLOBAL_UNLOCK(sc);
1246 case SIOCGIFMEDIA: /* fall through */
1248 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1249 && !(ifr->ifr_media & IFM_FDX)) {
1250 device_printf(sc->dev,
1251 "1000baseTX half-duplex unsupported\n");
1254 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1257 error = ether_ioctl(ifp, command, data);
1263 mge_miibus_readreg(device_t dev, int phy, int reg)
1268 * We assume static PHY address <=> device unit mapping:
1269 * PHY Address = MII_ADDR_BASE + devce unit.
1270 * This is true for most Marvell boards.
1272 * Code below grants proper PHY detection on each device
1277 if ((MII_ADDR_BASE + device_get_unit(dev)) != phy)
1280 MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1281 (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1283 retries = MGE_SMI_READ_RETRIES;
1284 while (--retries && !(MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_READVALID))
1285 DELAY(MGE_SMI_READ_DELAY);
1288 device_printf(dev, "Timeout while reading from PHY\n");
1290 return (MGE_READ(sc_mge0, MGE_REG_SMI) & 0xffff);
1294 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1298 if ((MII_ADDR_BASE + device_get_unit(dev)) != phy)
1301 MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1302 (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1304 retries = MGE_SMI_WRITE_RETRIES;
1305 while (--retries && MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_BUSY)
1306 DELAY(MGE_SMI_WRITE_DELAY);
1309 device_printf(dev, "Timeout while writing to PHY\n");
1314 mge_probe(device_t dev)
1317 device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1318 return (BUS_PROBE_DEFAULT);
1322 mge_resume(device_t dev)
1325 device_printf(dev, "%s\n", __FUNCTION__);
1330 mge_shutdown(device_t dev)
1332 struct mge_softc *sc = device_get_softc(dev);
1334 MGE_GLOBAL_LOCK(sc);
1336 #ifdef DEVICE_POLLING
1337 if (sc->ifp->if_capenable & IFCAP_POLLING)
1338 ether_poll_deregister(sc->ifp);
1343 MGE_GLOBAL_UNLOCK(sc);
1349 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1351 struct mge_desc_wrapper *dw = NULL;
1353 bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1361 /* Check for free descriptors */
1362 if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1363 /* No free descriptors */
1367 /* Fetch unused map */
1368 desc_no = sc->tx_desc_curr;
1369 dw = &sc->mge_tx_desc[desc_no];
1370 mapp = dw->buffer_dmap;
1372 bus_dmamap_sync(sc->mge_desc_dtag, mapp,
1373 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1375 /* Create mapping in DMA memory */
1376 error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1378 if (error != 0 || nsegs != 1 ) {
1379 bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1380 return ((error != 0) ? error : -1);
1383 bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1385 /* Everything is ok, now we can send buffers */
1386 for (seg = 0; seg < nsegs; seg++) {
1387 dw->mge_desc->byte_count = segs[seg].ds_len;
1388 dw->mge_desc->buffer = segs[seg].ds_addr;
1390 dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
1391 MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1395 mge_offload_setup_descriptor(sc, dw);
1398 bus_dmamap_sync(sc->mge_desc_dtag, mapp,
1399 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1401 sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1402 sc->tx_desc_used_count++;
1409 struct mge_softc *sc = msc;
1411 /* Check for TX timeout */
1416 /* Check for media type change */
1417 if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1418 mge_ifmedia_upd(sc->ifp);
1420 /* Schedule another timeout one second from now */
1421 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1425 mge_watchdog(struct mge_softc *sc)
1431 MGE_GLOBAL_LOCK(sc);
1433 if (sc->wd_timer == 0 || --sc->wd_timer) {
1434 MGE_GLOBAL_UNLOCK(sc);
1439 if_printf(ifp, "watchdog timeout\n");
1442 mge_init_locked(sc);
1444 MGE_GLOBAL_UNLOCK(sc);
1448 mge_start(struct ifnet *ifp)
1450 struct mge_softc *sc = ifp->if_softc;
1452 MGE_TRANSMIT_LOCK(sc);
1454 mge_start_locked(ifp);
1456 MGE_TRANSMIT_UNLOCK(sc);
1460 mge_start_locked(struct ifnet *ifp)
1462 struct mge_softc *sc;
1463 struct mbuf *m0, *mtmp;
1464 uint32_t reg_val, queued = 0;
1468 MGE_TRANSMIT_LOCK_ASSERT(sc);
1470 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1475 /* Get packet from the queue */
1476 IF_DEQUEUE(&ifp->if_snd, m0);
1480 mtmp = m_defrag(m0, M_DONTWAIT);
1484 if (mge_encap(sc, m0)) {
1485 IF_PREPEND(&ifp->if_snd, m0);
1486 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1494 /* Enable transmitter and watchdog timer */
1495 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1496 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1502 mge_stop(struct mge_softc *sc)
1505 volatile uint32_t reg_val, status;
1506 struct mge_desc_wrapper *dw;
1507 struct mge_desc *desc;
1512 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1515 /* Stop tick engine */
1516 callout_stop(&sc->wd_callout);
1518 /* Disable interface */
1519 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1522 /* Disable interrupts */
1523 mge_intrs_ctrl(sc, 0);
1525 /* Disable Rx and Tx */
1526 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1527 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1528 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1530 /* Remove pending data from TX queue */
1531 while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1532 sc->tx_desc_used_count) {
1533 /* Get the descriptor */
1534 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1535 desc = dw->mge_desc;
1536 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1537 BUS_DMASYNC_POSTREAD);
1539 /* Get descriptor status */
1540 status = desc->cmd_status;
1542 if (status & MGE_DMA_OWNED)
1545 sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1547 sc->tx_desc_used_count--;
1549 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1550 BUS_DMASYNC_POSTWRITE);
1551 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1553 m_freem(dw->buffer);
1554 dw->buffer = (struct mbuf*)NULL;
1557 /* Wait for end of transmission */
1560 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1561 if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1562 (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1568 if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1571 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1572 reg_val &= ~(PORT_SERIAL_ENABLE);
1573 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1577 mge_suspend(device_t dev)
1580 device_printf(dev, "%s\n", __FUNCTION__);
1585 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1586 uint32_t status, uint16_t bufsize)
1590 if (ifp->if_capenable & IFCAP_RXCSUM) {
1591 if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1592 csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1594 if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1595 (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1596 (status & MGE_RX_L4_CSUM_OK)) {
1597 csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1598 frame->m_pkthdr.csum_data = 0xFFFF;
1601 frame->m_pkthdr.csum_flags = csum_flags;
1606 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1608 struct mbuf *m0 = dw->buffer;
1609 struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1610 int csum_flags = m0->m_pkthdr.csum_flags;
1616 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1617 etype = ntohs(eh->evl_proto);
1618 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1619 csum_flags |= MGE_TX_VLAN_TAGGED;
1621 etype = ntohs(eh->evl_encap_proto);
1622 ehlen = ETHER_HDR_LEN;
1625 if (etype != ETHERTYPE_IP) {
1627 "TCP/IP Offload enabled for unsupported "
1632 ip = (struct ip *)(m0->m_data + ehlen);
1633 cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1635 if ((m0->m_flags & M_FRAG) == 0)
1636 cmd_status |= MGE_TX_NOT_FRAGMENT;
1639 if (csum_flags & CSUM_IP)
1640 cmd_status |= MGE_TX_GEN_IP_CSUM;
1642 if (csum_flags & CSUM_TCP)
1643 cmd_status |= MGE_TX_GEN_L4_CSUM;
1645 if (csum_flags & CSUM_UDP)
1646 cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1648 dw->mge_desc->cmd_status |= cmd_status;
1652 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1656 MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1657 MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1658 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1659 MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1660 MGE_PORT_INT_EXT_TXBUF0);
1662 MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1663 MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1665 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1666 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1668 MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1669 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1674 mge_crc8(uint8_t *data, int size)
1677 static const uint8_t ct[256] = {
1678 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1679 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1680 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1681 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1682 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1683 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1684 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1685 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1686 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1687 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1688 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1689 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1690 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1691 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1692 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1693 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1694 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1695 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1696 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1697 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1698 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1699 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1700 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1701 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1702 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1703 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1704 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1705 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1706 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1707 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1708 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1709 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1713 crc = ct[crc ^ *(data++)];
1719 mge_setup_multicast(struct mge_softc *sc)
1721 uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1722 uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1723 uint32_t smt[MGE_MCAST_REG_NUMBER];
1724 uint32_t omt[MGE_MCAST_REG_NUMBER];
1725 struct ifnet *ifp = sc->ifp;
1726 struct ifmultiaddr *ifma;
1730 if (ifp->if_flags & IFF_ALLMULTI) {
1731 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1732 smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1734 memset(smt, 0, sizeof(smt));
1735 memset(omt, 0, sizeof(omt));
1737 if_maddr_rlock(ifp);
1738 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1739 if (ifma->ifma_addr->sa_family != AF_LINK)
1742 mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1743 if (memcmp(mac, special, sizeof(special)) == 0) {
1745 smt[i >> 2] |= v << ((i & 0x03) << 3);
1747 i = mge_crc8(mac, ETHER_ADDR_LEN);
1748 omt[i >> 2] |= v << ((i & 0x03) << 3);
1751 if_maddr_runlock(ifp);
1754 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1755 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1756 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1761 mge_set_rxic(struct mge_softc *sc)
1765 if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1766 sc->rx_ic_time = sc->mge_rx_ipg_max;
1768 reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1769 reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1770 reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1771 MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1775 mge_set_txic(struct mge_softc *sc)
1779 if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1780 sc->tx_ic_time = sc->mge_tfut_ipg_max;
1782 reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1783 reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1784 reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1785 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1789 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1791 struct mge_softc *sc = (struct mge_softc *)arg1;
1795 time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1796 error = sysctl_handle_int(oidp, &time, 0, req);
1800 MGE_GLOBAL_LOCK(sc);
1801 if (arg2 == MGE_IC_RX) {
1802 sc->rx_ic_time = time;
1805 sc->tx_ic_time = time;
1808 MGE_GLOBAL_UNLOCK(sc);
1814 mge_add_sysctls(struct mge_softc *sc)
1816 struct sysctl_ctx_list *ctx;
1817 struct sysctl_oid_list *children;
1818 struct sysctl_oid *tree;
1820 ctx = device_get_sysctl_ctx(sc->dev);
1821 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1822 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1823 CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1824 children = SYSCTL_CHILDREN(tree);
1826 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1827 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1828 "I", "IC RX time threshold");
1829 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1830 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1831 "I", "IC TX time threshold");