2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5 * Developed by Semihalf.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of MARVELL nor the names of contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
50 #include <net/ethernet.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
63 #include <sys/sockio.h>
65 #include <machine/bus.h>
67 #include <machine/resource.h>
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
73 #define MII_ADDR_BASE 8
76 #include <dev/mge/if_mgevar.h>
77 #include <arm/mv/mvreg.h>
78 #include <arm/mv/mvvar.h>
80 #include "miibus_if.h"
82 /* PHY registers are in the address space of the first mge unit */
83 static struct mge_softc *sc_mge0 = NULL;
85 static int mge_probe(device_t dev);
86 static int mge_attach(device_t dev);
87 static int mge_detach(device_t dev);
88 static int mge_shutdown(device_t dev);
89 static int mge_suspend(device_t dev);
90 static int mge_resume(device_t dev);
92 static int mge_miibus_readreg(device_t dev, int phy, int reg);
93 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
95 static int mge_ifmedia_upd(struct ifnet *ifp);
96 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
98 static void mge_init(void *arg);
99 static void mge_init_locked(void *arg);
100 static void mge_start(struct ifnet *ifp);
101 static void mge_start_locked(struct ifnet *ifp);
102 static void mge_watchdog(struct mge_softc *sc);
103 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
105 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
106 static uint32_t mge_rx_ipg(uint32_t val, int ver);
107 static void mge_ver_params(struct mge_softc *sc);
109 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
110 static void mge_intr_rx(void *arg);
111 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
112 static void mge_intr_tx(void *arg);
113 static void mge_intr_tx_locked(struct mge_softc *sc);
114 static void mge_intr_misc(void *arg);
115 static void mge_intr_sum(void *arg);
116 static void mge_intr_err(void *arg);
117 static void mge_stop(struct mge_softc *sc);
118 static void mge_tick(void *msc);
119 static uint32_t mge_set_port_serial_control(uint32_t media);
120 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
121 static void mge_set_mac_address(struct mge_softc *sc);
122 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
124 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
125 static int mge_allocate_dma(struct mge_softc *sc);
126 static int mge_alloc_desc_dma(struct mge_softc *sc,
127 struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
128 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
129 struct mbuf **mbufp, bus_addr_t *paddr);
130 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
131 static void mge_free_dma(struct mge_softc *sc);
132 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
133 bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
134 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
135 uint32_t status, uint16_t bufsize);
136 static void mge_offload_setup_descriptor(struct mge_softc *sc,
137 struct mge_desc_wrapper *dw);
138 static uint8_t mge_crc8(uint8_t *data, int size);
139 static void mge_setup_multicast(struct mge_softc *sc);
140 static void mge_set_rxic(struct mge_softc *sc);
141 static void mge_set_txic(struct mge_softc *sc);
142 static void mge_add_sysctls(struct mge_softc *sc);
143 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
145 static device_method_t mge_methods[] = {
146 /* Device interface */
147 DEVMETHOD(device_probe, mge_probe),
148 DEVMETHOD(device_attach, mge_attach),
149 DEVMETHOD(device_detach, mge_detach),
150 DEVMETHOD(device_shutdown, mge_shutdown),
151 DEVMETHOD(device_suspend, mge_suspend),
152 DEVMETHOD(device_resume, mge_resume),
154 DEVMETHOD(miibus_readreg, mge_miibus_readreg),
155 DEVMETHOD(miibus_writereg, mge_miibus_writereg),
159 static driver_t mge_driver = {
162 sizeof(struct mge_softc),
165 static devclass_t mge_devclass;
167 DRIVER_MODULE(mge, mbus, mge_driver, mge_devclass, 0, 0);
168 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
169 MODULE_DEPEND(mge, ether, 1, 1, 1);
170 MODULE_DEPEND(mge, miibus, 1, 1, 1);
172 static struct resource_spec res_spec[] = {
173 { SYS_RES_MEMORY, 0, RF_ACTIVE },
174 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
175 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
176 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
177 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
178 { SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
183 driver_intr_t *handler;
185 } mge_intrs[MGE_INTR_COUNT] = {
186 { mge_intr_rx, "GbE receive interrupt" },
187 { mge_intr_tx, "GbE transmit interrupt" },
188 { mge_intr_misc,"GbE misc interrupt" },
189 { mge_intr_sum, "GbE summary interrupt" },
190 { mge_intr_err, "GbE error interrupt" },
194 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
196 uint32_t mac_l, mac_h;
198 /* XXX use currently programmed MAC address; eventually this info will
199 * be provided by the loader */
201 mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
202 mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
204 addr[0] = (mac_h & 0xff000000) >> 24;
205 addr[1] = (mac_h & 0x00ff0000) >> 16;
206 addr[2] = (mac_h & 0x0000ff00) >> 8;
207 addr[3] = (mac_h & 0x000000ff);
208 addr[4] = (mac_l & 0x0000ff00) >> 8;
209 addr[5] = (mac_l & 0x000000ff);
213 mge_tfut_ipg(uint32_t val, int ver)
218 return ((val & 0x3fff) << 4);
221 return ((val & 0xffff) << 4);
226 mge_rx_ipg(uint32_t val, int ver)
231 return ((val & 0x3fff) << 8);
234 return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
239 mge_ver_params(struct mge_softc *sc)
244 if (d == MV_DEV_88F6281 || d == MV_DEV_MV78100 ||
245 d == MV_DEV_MV78100_Z0) {
248 sc->mge_tfut_ipg_max = 0xFFFF;
249 sc->mge_rx_ipg_max = 0xFFFF;
250 sc->mge_tx_arb_cfg = 0xFC0000FF;
251 sc->mge_tx_tok_cfg = 0xFFFF7FFF;
252 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
256 sc->mge_tfut_ipg_max = 0x3FFF;
257 sc->mge_rx_ipg_max = 0x3FFF;
258 sc->mge_tx_arb_cfg = 0x000000FF;
259 sc->mge_tx_tok_cfg = 0x3FFFFFFF;
260 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
265 mge_set_mac_address(struct mge_softc *sc)
268 uint32_t mac_l, mac_h;
270 MGE_GLOBAL_LOCK_ASSERT(sc);
272 if_mac = (char *)IF_LLADDR(sc->ifp);
274 mac_l = (if_mac[4] << 8) | (if_mac[5]);
275 mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
276 (if_mac[2] << 8) | (if_mac[3] << 0);
278 MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
279 MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
281 mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
285 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
287 uint32_t reg_idx, reg_off, reg_val, i;
290 reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
291 reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
292 reg_val = (1 | (queue << 1)) << reg_off;
294 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
296 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
298 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
303 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
305 uint32_t port_config;
308 /* Enable or disable promiscuous mode as needed */
309 if (sc->ifp->if_flags & IFF_PROMISC) {
310 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
311 port_config |= PORT_CONFIG_UPM;
312 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
314 reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
315 (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
317 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
318 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
319 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
322 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
323 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
326 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
327 port_config &= ~PORT_CONFIG_UPM;
328 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
330 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
331 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
332 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
335 mge_set_mac_address(sc);
340 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
344 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
347 *paddr = segs->ds_addr;
351 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
354 struct mbuf *new_mbuf;
355 bus_dma_segment_t seg[1];
359 KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
361 new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
362 if (new_mbuf == NULL)
364 new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
367 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
368 bus_dmamap_unload(tag, map);
371 error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
373 KASSERT(nsegs == 1, ("Too many segments returned!"));
374 if (nsegs != 1 || error)
375 panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
377 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
380 (*paddr) = seg->ds_addr;
385 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
386 uint32_t size, bus_dma_tag_t *buffer_tag)
388 struct mge_desc_wrapper *dw;
389 bus_addr_t desc_paddr;
393 for (i = size - 1; i >= 0; i--) {
395 error = bus_dmamem_alloc(sc->mge_desc_dtag,
396 (void**)&(dw->mge_desc),
397 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
401 if_printf(sc->ifp, "failed to allocate DMA memory\n");
406 error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
407 dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
408 &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
411 if_printf(sc->ifp, "can't load descriptor\n");
412 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
418 /* Chain descriptors */
419 dw->mge_desc->next_desc = desc_paddr;
420 desc_paddr = dw->mge_desc_paddr;
422 tab[size - 1].mge_desc->next_desc = desc_paddr;
424 /* Allocate a busdma tag for mbufs. */
425 error = bus_dma_tag_create(NULL, /* parent */
426 8, 0, /* alignment, boundary */
427 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
428 BUS_SPACE_MAXADDR, /* highaddr */
429 NULL, NULL, /* filtfunc, filtfuncarg */
430 MCLBYTES, 1, /* maxsize, nsegments */
431 MCLBYTES, 0, /* maxsegsz, flags */
432 NULL, NULL, /* lockfunc, lockfuncarg */
433 buffer_tag); /* dmat */
435 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
439 /* Create TX busdma maps */
440 for (i = 0; i < size; i++) {
442 error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
444 if_printf(sc->ifp, "failed to create map for mbuf\n");
448 dw->buffer = (struct mbuf*)NULL;
449 dw->mge_desc->buffer = (bus_addr_t)NULL;
456 mge_allocate_dma(struct mge_softc *sc)
459 struct mge_desc_wrapper *dw;
462 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
463 error = bus_dma_tag_create(NULL, /* parent */
464 16, 0, /* alignment, boundary */
465 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
466 BUS_SPACE_MAXADDR, /* highaddr */
467 NULL, NULL, /* filtfunc, filtfuncarg */
468 sizeof(struct mge_desc), 1, /* maxsize, nsegments */
469 sizeof(struct mge_desc), 0, /* maxsegsz, flags */
470 NULL, NULL, /* lockfunc, lockfuncarg */
471 &sc->mge_desc_dtag); /* dmat */
474 mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
476 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
479 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
480 dw = &(sc->mge_rx_desc[i]);
481 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
482 &dw->mge_desc->buffer);
485 sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
486 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
492 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
493 uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
495 struct mge_desc_wrapper *dw;
498 for (i = 0; i < size; i++) {
502 if (dw->buffer_dmap) {
504 bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
505 BUS_DMASYNC_POSTREAD);
506 bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
508 bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
512 /* Free RX descriptors */
514 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
515 BUS_DMASYNC_POSTREAD);
516 bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
517 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
524 mge_free_dma(struct mge_softc *sc)
526 /* Free desciptors and mbufs */
527 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
528 mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
530 /* Destroy mbuf dma tag */
531 bus_dma_tag_destroy(sc->mge_tx_dtag);
532 bus_dma_tag_destroy(sc->mge_rx_dtag);
533 /* Destroy descriptors tag */
534 bus_dma_tag_destroy(sc->mge_desc_dtag);
538 mge_reinit_rx(struct mge_softc *sc)
540 struct mge_desc_wrapper *dw;
543 MGE_RECEIVE_LOCK_ASSERT(sc);
545 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
547 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
550 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
551 dw = &(sc->mge_rx_desc[i]);
552 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
553 &dw->mge_desc->buffer);
556 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
557 sc->rx_desc_curr = 0;
559 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
562 /* Enable RX queue */
563 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
566 #ifdef DEVICE_POLLING
567 static poll_handler_t mge_poll;
570 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
572 struct mge_softc *sc = ifp->if_softc;
573 uint32_t int_cause, int_cause_ext;
578 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
579 MGE_GLOBAL_UNLOCK(sc);
583 if (cmd == POLL_AND_CHECK_STATUS) {
584 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
585 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
587 /* Check for resource error */
588 if (int_cause & MGE_PORT_INT_RXERRQ0)
591 if (int_cause || int_cause_ext) {
592 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
593 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
597 mge_intr_tx_locked(sc);
598 rx_npkts = mge_intr_rx_locked(sc, count);
600 MGE_GLOBAL_UNLOCK(sc);
603 #endif /* DEVICE_POLLING */
606 mge_attach(device_t dev)
608 struct mge_softc *sc;
610 uint8_t hwaddr[ETHER_ADDR_LEN];
613 sc = device_get_softc(dev);
616 if (device_get_unit(dev) == 0)
619 /* Set chip version-dependent parameters */
622 /* Initialize mutexes */
623 mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
624 mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
626 /* Allocate IO and IRQ resources */
627 error = bus_alloc_resources(dev, res_spec, sc->res);
629 device_printf(dev, "could not allocate resources\n");
634 /* Allocate DMA, buffers, buffer descriptors */
635 error = mge_allocate_dma(sc);
641 sc->tx_desc_curr = 0;
642 sc->rx_desc_curr = 0;
643 sc->tx_desc_used_idx = 0;
644 sc->tx_desc_used_count = 0;
646 /* Configure defaults for interrupts coalescing */
647 sc->rx_ic_time = 768;
648 sc->tx_ic_time = 768;
651 /* Allocate network interface */
652 ifp = sc->ifp = if_alloc(IFT_ETHER);
654 device_printf(dev, "if_alloc() failed\n");
659 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
661 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
662 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
663 ifp->if_capenable = ifp->if_capabilities;
664 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
666 #ifdef DEVICE_POLLING
667 /* Advertise that polling is supported */
668 ifp->if_capabilities |= IFCAP_POLLING;
671 ifp->if_init = mge_init;
672 ifp->if_start = mge_start;
673 ifp->if_ioctl = mge_ioctl;
675 ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
676 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
677 IFQ_SET_READY(&ifp->if_snd);
679 mge_get_mac_address(sc, hwaddr);
680 ether_ifattach(ifp, hwaddr);
681 callout_init(&sc->wd_callout, 0);
684 error = mii_phy_probe(dev, &sc->miibus, mge_ifmedia_upd, mge_ifmedia_sts);
686 device_printf(dev, "MII failed to find PHY\n");
692 sc->mii = device_get_softc(sc->miibus);
694 /* Attach interrupt handlers */
695 for (i = 0; i < 2; ++i) {
696 error = bus_setup_intr(dev, sc->res[1 + i],
697 INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
698 sc, &sc->ih_cookie[i]);
700 device_printf(dev, "could not setup %s\n",
701 mge_intrs[i].description);
702 ether_ifdetach(sc->ifp);
711 mge_detach(device_t dev)
713 struct mge_softc *sc;
716 sc = device_get_softc(dev);
718 /* Stop controller and free TX queue */
722 /* Wait for stopping ticks */
723 callout_drain(&sc->wd_callout);
725 /* Stop and release all interrupts */
726 for (i = 0; i < 2; ++i) {
727 error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
729 device_printf(dev, "could not release %s\n",
730 mge_intrs[i].description);
733 /* Detach network interface */
735 ether_ifdetach(sc->ifp);
739 /* Free DMA resources */
742 /* Free IO memory handler */
743 bus_release_resources(dev, res_spec, sc->res);
745 /* Destroy mutexes */
746 mtx_destroy(&sc->receive_lock);
747 mtx_destroy(&sc->transmit_lock);
753 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
755 struct mge_softc *sc = ifp->if_softc;
756 struct mii_data *mii;
758 MGE_TRANSMIT_LOCK(sc);
763 ifmr->ifm_active = mii->mii_media_active;
764 ifmr->ifm_status = mii->mii_media_status;
766 MGE_TRANSMIT_UNLOCK(sc);
770 mge_set_port_serial_control(uint32_t media)
772 uint32_t port_config;
774 port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
775 PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
777 if (IFM_TYPE(media) == IFM_ETHER) {
778 switch(IFM_SUBTYPE(media)) {
782 port_config |= (PORT_SERIAL_GMII_SPEED_1000 |
783 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
784 PORT_SERIAL_SPEED_AUTONEG);
787 port_config |= (PORT_SERIAL_MII_SPEED_100 |
788 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
789 PORT_SERIAL_SPEED_AUTONEG);
792 port_config |= (PORT_SERIAL_AUTONEG |
793 PORT_SERIAL_AUTONEG_FC |
794 PORT_SERIAL_SPEED_AUTONEG);
798 port_config |= PORT_SERIAL_FULL_DUPLEX;
800 return (port_config);
804 mge_ifmedia_upd(struct ifnet *ifp)
806 struct mge_softc *sc = ifp->if_softc;
808 if (ifp->if_flags & IFF_UP) {
811 sc->mge_media_status = sc->mii->mii_media.ifm_media;
812 mii_mediachg(sc->mii);
815 MGE_GLOBAL_UNLOCK(sc);
824 struct mge_softc *sc = arg;
828 mge_init_locked(arg);
830 MGE_GLOBAL_UNLOCK(sc);
834 mge_init_locked(void *arg)
836 struct mge_softc *sc = arg;
837 struct mge_desc_wrapper *dw;
838 volatile uint32_t reg_val;
842 MGE_GLOBAL_LOCK_ASSERT(sc);
847 /* Disable interrupts */
848 mge_intrs_ctrl(sc, 0);
850 /* Set MAC address */
851 mge_set_mac_address(sc);
853 /* Setup multicast filters */
854 mge_setup_multicast(sc);
856 if (sc->mge_ver == 2) {
857 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
858 MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
861 /* Initialize TX queue configuration registers */
862 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
863 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
864 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
866 /* Clear TX queue configuration registers for unused queues */
867 for (i = 1; i < 7; i++) {
868 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
869 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
870 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
873 /* Set default MTU */
874 MGE_WRITE(sc, sc->mge_mtu, 0);
876 /* Port configuration */
877 MGE_WRITE(sc, MGE_PORT_CONFIG,
878 PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
879 PORT_CONFIG_ARO_RXQ(0));
880 MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
882 /* Setup port configuration */
883 reg_val = mge_set_port_serial_control(sc->mge_media_status);
884 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
886 /* Setup SDMA configuration */
887 MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
888 MGE_SDMA_TX_BYTE_SWAP |
889 MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
890 MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
892 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
894 MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
895 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
898 /* Reset descriptor indexes */
899 sc->tx_desc_curr = 0;
900 sc->rx_desc_curr = 0;
901 sc->tx_desc_used_idx = 0;
902 sc->tx_desc_used_count = 0;
904 /* Enable RX descriptors */
905 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
906 dw = &sc->mge_rx_desc[i];
907 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
908 dw->mge_desc->buff_size = MCLBYTES;
909 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
910 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
913 /* Enable RX queue */
914 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
917 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
918 reg_val |= PORT_SERIAL_ENABLE;
919 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
922 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
923 if (reg_val & MGE_STATUS_LINKUP)
927 if_printf(sc->ifp, "Timeout on link-up\n");
932 /* Setup interrupts coalescing */
936 /* Enable interrupts */
937 #ifdef DEVICE_POLLING
939 * * ...only if polling is not turned on. Disable interrupts explicitly
940 * if polling is enabled.
942 if (sc->ifp->if_capenable & IFCAP_POLLING)
943 mge_intrs_ctrl(sc, 0);
945 #endif /* DEVICE_POLLING */
946 mge_intrs_ctrl(sc, 1);
948 /* Activate network interface */
949 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
950 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
953 /* Schedule watchdog timeout */
954 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
958 mge_intr_err(void *arg)
960 struct mge_softc *sc = arg;
964 if_printf(ifp, "%s\n", __FUNCTION__);
968 mge_intr_misc(void *arg)
970 struct mge_softc *sc = arg;
974 if_printf(ifp, "%s\n", __FUNCTION__);
978 mge_intr_rx(void *arg) {
979 struct mge_softc *sc = arg;
980 uint32_t int_cause, int_cause_ext;
982 MGE_RECEIVE_LOCK(sc);
984 #ifdef DEVICE_POLLING
985 if (sc->ifp->if_capenable & IFCAP_POLLING) {
986 MGE_RECEIVE_UNLOCK(sc);
991 /* Get interrupt cause */
992 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
993 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
995 /* Check for resource error */
996 if (int_cause & MGE_PORT_INT_RXERRQ0) {
998 MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
999 int_cause & ~MGE_PORT_INT_RXERRQ0);
1002 int_cause &= MGE_PORT_INT_RXQ0;
1003 int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1005 if (int_cause || int_cause_ext) {
1006 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1007 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1008 mge_intr_rx_locked(sc, -1);
1011 MGE_RECEIVE_UNLOCK(sc);
1016 mge_intr_rx_locked(struct mge_softc *sc, int count)
1018 struct ifnet *ifp = sc->ifp;
1021 struct mge_desc_wrapper* dw;
1025 MGE_RECEIVE_LOCK_ASSERT(sc);
1027 while (count != 0) {
1028 dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1029 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1030 BUS_DMASYNC_POSTREAD);
1033 status = dw->mge_desc->cmd_status;
1034 bufsize = dw->mge_desc->buff_size;
1035 if ((status & MGE_DMA_OWNED) != 0)
1038 if (dw->mge_desc->byte_count &&
1039 ~(status & MGE_ERR_SUMMARY)) {
1041 bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1042 BUS_DMASYNC_POSTREAD);
1044 mb = m_devget(dw->buffer->m_data,
1045 dw->mge_desc->byte_count - ETHER_CRC_LEN,
1049 /* Give up if no mbufs */
1053 mb->m_pkthdr.len -= 2;
1056 mge_offload_process_frame(ifp, mb, status,
1059 MGE_RECEIVE_UNLOCK(sc);
1060 (*ifp->if_input)(ifp, mb);
1061 MGE_RECEIVE_LOCK(sc);
1065 dw->mge_desc->byte_count = 0;
1066 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1067 sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1068 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1069 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1079 mge_intr_sum(void *arg)
1081 struct mge_softc *sc = arg;
1085 if_printf(ifp, "%s\n", __FUNCTION__);
1089 mge_intr_tx(void *arg)
1091 struct mge_softc *sc = arg;
1092 uint32_t int_cause_ext;
1094 MGE_TRANSMIT_LOCK(sc);
1096 #ifdef DEVICE_POLLING
1097 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1098 MGE_TRANSMIT_UNLOCK(sc);
1103 /* Ack the interrupt */
1104 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1105 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
1106 int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
1108 mge_intr_tx_locked(sc);
1110 MGE_TRANSMIT_UNLOCK(sc);
1115 mge_intr_tx_locked(struct mge_softc *sc)
1117 struct ifnet *ifp = sc->ifp;
1118 struct mge_desc_wrapper *dw;
1119 struct mge_desc *desc;
1123 MGE_TRANSMIT_LOCK_ASSERT(sc);
1125 /* Disable watchdog */
1128 while (sc->tx_desc_used_count) {
1129 /* Get the descriptor */
1130 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1131 desc = dw->mge_desc;
1132 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1133 BUS_DMASYNC_POSTREAD);
1135 /* Get descriptor status */
1136 status = desc->cmd_status;
1138 if (status & MGE_DMA_OWNED)
1141 sc->tx_desc_used_idx =
1142 (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1143 sc->tx_desc_used_count--;
1145 /* Update collision statistics */
1146 if (status & MGE_ERR_SUMMARY) {
1147 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1148 ifp->if_collisions++;
1149 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1150 ifp->if_collisions += 16;
1153 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1154 BUS_DMASYNC_POSTWRITE);
1155 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1156 m_freem(dw->buffer);
1157 dw->buffer = (struct mbuf*)NULL;
1164 /* Now send anything that was pending */
1165 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1166 mge_start_locked(ifp);
1171 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1173 struct mge_softc *sc = ifp->if_softc;
1174 struct ifreq *ifr = (struct ifreq *)data;
1182 MGE_GLOBAL_LOCK(sc);
1184 if (ifp->if_flags & IFF_UP) {
1185 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1186 flags = ifp->if_flags ^ sc->mge_if_flags;
1187 if (flags & IFF_PROMISC)
1188 mge_set_prom_mode(sc,
1189 MGE_RX_DEFAULT_QUEUE);
1191 if (flags & IFF_ALLMULTI)
1192 mge_setup_multicast(sc);
1194 mge_init_locked(sc);
1196 else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1199 sc->mge_if_flags = ifp->if_flags;
1200 MGE_GLOBAL_UNLOCK(sc);
1204 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1205 MGE_GLOBAL_LOCK(sc);
1206 mge_setup_multicast(sc);
1207 MGE_GLOBAL_UNLOCK(sc);
1211 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1212 if (mask & IFCAP_HWCSUM) {
1213 ifp->if_capenable &= ~IFCAP_HWCSUM;
1214 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1215 if (ifp->if_capenable & IFCAP_TXCSUM)
1216 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1218 ifp->if_hwassist = 0;
1220 #ifdef DEVICE_POLLING
1221 if (mask & IFCAP_POLLING) {
1222 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1223 error = ether_poll_register(mge_poll, ifp);
1227 MGE_GLOBAL_LOCK(sc);
1228 mge_intrs_ctrl(sc, 0);
1229 ifp->if_capenable |= IFCAP_POLLING;
1230 MGE_GLOBAL_UNLOCK(sc);
1232 error = ether_poll_deregister(ifp);
1233 MGE_GLOBAL_LOCK(sc);
1234 mge_intrs_ctrl(sc, 1);
1235 ifp->if_capenable &= ~IFCAP_POLLING;
1236 MGE_GLOBAL_UNLOCK(sc);
1241 case SIOCGIFMEDIA: /* fall through */
1243 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1244 && !(ifr->ifr_media & IFM_FDX)) {
1245 device_printf(sc->dev,
1246 "1000baseTX half-duplex unsupported\n");
1249 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1252 error = ether_ioctl(ifp, command, data);
1258 mge_miibus_readreg(device_t dev, int phy, int reg)
1263 * We assume static PHY address <=> device unit mapping:
1264 * PHY Address = MII_ADDR_BASE + devce unit.
1265 * This is true for most Marvell boards.
1267 * Code below grants proper PHY detection on each device
1272 if ((MII_ADDR_BASE + device_get_unit(dev)) != phy)
1275 MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1276 (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1278 retries = MGE_SMI_READ_RETRIES;
1279 while (--retries && !(MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_READVALID))
1280 DELAY(MGE_SMI_READ_DELAY);
1283 device_printf(dev, "Timeout while reading from PHY\n");
1285 return (MGE_READ(sc_mge0, MGE_REG_SMI) & 0xffff);
1289 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1293 if ((MII_ADDR_BASE + device_get_unit(dev)) != phy)
1296 MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1297 (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1299 retries = MGE_SMI_WRITE_RETRIES;
1300 while (--retries && MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_BUSY)
1301 DELAY(MGE_SMI_WRITE_DELAY);
1304 device_printf(dev, "Timeout while writing to PHY\n");
1309 mge_probe(device_t dev)
1312 device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1313 return (BUS_PROBE_DEFAULT);
1317 mge_resume(device_t dev)
1320 device_printf(dev, "%s\n", __FUNCTION__);
1325 mge_shutdown(device_t dev)
1327 struct mge_softc *sc = device_get_softc(dev);
1329 MGE_GLOBAL_LOCK(sc);
1331 #ifdef DEVICE_POLLING
1332 if (sc->ifp->if_capenable & IFCAP_POLLING)
1333 ether_poll_deregister(sc->ifp);
1338 MGE_GLOBAL_UNLOCK(sc);
1344 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1346 struct mge_desc_wrapper *dw = NULL;
1348 bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1356 /* Check for free descriptors */
1357 if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1358 /* No free descriptors */
1362 /* Fetch unused map */
1363 desc_no = sc->tx_desc_curr;
1364 dw = &sc->mge_tx_desc[desc_no];
1365 mapp = dw->buffer_dmap;
1367 /* Create mapping in DMA memory */
1368 error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1370 if (error != 0 || nsegs != 1 ) {
1371 bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1372 return ((error != 0) ? error : -1);
1375 bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1377 /* Everything is ok, now we can send buffers */
1378 for (seg = 0; seg < nsegs; seg++) {
1379 dw->mge_desc->byte_count = segs[seg].ds_len;
1380 dw->mge_desc->buffer = segs[seg].ds_addr;
1382 dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
1383 MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1387 mge_offload_setup_descriptor(sc, dw);
1390 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1391 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1393 sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1394 sc->tx_desc_used_count++;
1401 struct mge_softc *sc = msc;
1403 /* Check for TX timeout */
1408 /* Check for media type change */
1409 if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1410 mge_ifmedia_upd(sc->ifp);
1412 /* Schedule another timeout one second from now */
1413 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1417 mge_watchdog(struct mge_softc *sc)
1423 MGE_GLOBAL_LOCK(sc);
1425 if (sc->wd_timer == 0 || --sc->wd_timer) {
1426 MGE_GLOBAL_UNLOCK(sc);
1431 if_printf(ifp, "watchdog timeout\n");
1434 mge_init_locked(sc);
1436 MGE_GLOBAL_UNLOCK(sc);
1440 mge_start(struct ifnet *ifp)
1442 struct mge_softc *sc = ifp->if_softc;
1444 MGE_TRANSMIT_LOCK(sc);
1446 mge_start_locked(ifp);
1448 MGE_TRANSMIT_UNLOCK(sc);
1452 mge_start_locked(struct ifnet *ifp)
1454 struct mge_softc *sc;
1455 struct mbuf *m0, *mtmp;
1456 uint32_t reg_val, queued = 0;
1460 MGE_TRANSMIT_LOCK_ASSERT(sc);
1462 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1467 /* Get packet from the queue */
1468 IF_DEQUEUE(&ifp->if_snd, m0);
1472 mtmp = m_defrag(m0, M_DONTWAIT);
1476 if (mge_encap(sc, m0)) {
1477 IF_PREPEND(&ifp->if_snd, m0);
1478 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1486 /* Enable transmitter and watchdog timer */
1487 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1488 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1494 mge_stop(struct mge_softc *sc)
1497 volatile uint32_t reg_val, status;
1498 struct mge_desc_wrapper *dw;
1499 struct mge_desc *desc;
1504 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1507 /* Stop tick engine */
1508 callout_stop(&sc->wd_callout);
1510 /* Disable interface */
1511 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1514 /* Disable interrupts */
1515 mge_intrs_ctrl(sc, 0);
1517 /* Disable Rx and Tx */
1518 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1519 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1520 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1522 /* Remove pending data from TX queue */
1523 while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1524 sc->tx_desc_used_count) {
1525 /* Get the descriptor */
1526 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1527 desc = dw->mge_desc;
1528 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1529 BUS_DMASYNC_POSTREAD);
1531 /* Get descriptor status */
1532 status = desc->cmd_status;
1534 if (status & MGE_DMA_OWNED)
1537 sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1539 sc->tx_desc_used_count--;
1541 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1542 BUS_DMASYNC_POSTWRITE);
1543 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1545 m_freem(dw->buffer);
1546 dw->buffer = (struct mbuf*)NULL;
1549 /* Wait for end of transmission */
1552 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1553 if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1554 (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1560 if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1563 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1564 reg_val &= ~(PORT_SERIAL_ENABLE);
1565 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1569 mge_suspend(device_t dev)
1572 device_printf(dev, "%s\n", __FUNCTION__);
1577 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1578 uint32_t status, uint16_t bufsize)
1582 if (ifp->if_capenable & IFCAP_RXCSUM) {
1583 if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1584 csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1586 if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1587 (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1588 (status & MGE_RX_L4_CSUM_OK)) {
1589 csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1590 frame->m_pkthdr.csum_data = 0xFFFF;
1593 frame->m_pkthdr.csum_flags = csum_flags;
1598 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1600 struct mbuf *m0 = dw->buffer;
1601 struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1602 int csum_flags = m0->m_pkthdr.csum_flags;
1608 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1609 etype = ntohs(eh->evl_proto);
1610 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1611 csum_flags |= MGE_TX_VLAN_TAGGED;
1613 etype = ntohs(eh->evl_encap_proto);
1614 ehlen = ETHER_HDR_LEN;
1617 if (etype != ETHERTYPE_IP) {
1619 "TCP/IP Offload enabled for unsupported "
1624 ip = (struct ip *)(m0->m_data + ehlen);
1625 cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1627 if ((m0->m_flags & M_FRAG) == 0)
1628 cmd_status |= MGE_TX_NOT_FRAGMENT;
1631 if (csum_flags & CSUM_IP)
1632 cmd_status |= MGE_TX_GEN_IP_CSUM;
1634 if (csum_flags & CSUM_TCP)
1635 cmd_status |= MGE_TX_GEN_L4_CSUM;
1637 if (csum_flags & CSUM_UDP)
1638 cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1640 dw->mge_desc->cmd_status |= cmd_status;
1644 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1648 MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1649 MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1650 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1651 MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1652 MGE_PORT_INT_EXT_TXBUF0);
1654 MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1655 MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1657 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1658 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1660 MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1661 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1666 mge_crc8(uint8_t *data, int size)
1669 static const uint8_t ct[256] = {
1670 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1671 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1672 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1673 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1674 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1675 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1676 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1677 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1678 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1679 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1680 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1681 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1682 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1683 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1684 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1685 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1686 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1687 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1688 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1689 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1690 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1691 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1692 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1693 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1694 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1695 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1696 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1697 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1698 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1699 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1700 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1701 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1705 crc = ct[crc ^ *(data++)];
1711 mge_setup_multicast(struct mge_softc *sc)
1713 uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1714 uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1715 uint32_t smt[MGE_MCAST_REG_NUMBER];
1716 uint32_t omt[MGE_MCAST_REG_NUMBER];
1717 struct ifnet *ifp = sc->ifp;
1718 struct ifmultiaddr *ifma;
1722 if (ifp->if_flags & IFF_ALLMULTI) {
1723 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1724 smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1726 memset(smt, 0, sizeof(smt));
1727 memset(omt, 0, sizeof(omt));
1729 if_maddr_rlock(ifp);
1730 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1731 if (ifma->ifma_addr->sa_family != AF_LINK)
1734 mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1735 if (memcmp(mac, special, sizeof(special)) == 0) {
1737 smt[i >> 2] |= v << ((i & 0x03) << 3);
1739 i = mge_crc8(mac, ETHER_ADDR_LEN);
1740 omt[i >> 2] |= v << ((i & 0x03) << 3);
1743 if_maddr_runlock(ifp);
1746 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1747 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1748 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1753 mge_set_rxic(struct mge_softc *sc)
1757 if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1758 sc->rx_ic_time = sc->mge_rx_ipg_max;
1760 reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1761 reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1762 reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1763 MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1767 mge_set_txic(struct mge_softc *sc)
1771 if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1772 sc->tx_ic_time = sc->mge_tfut_ipg_max;
1774 reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1775 reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1776 reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1777 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1781 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1783 struct mge_softc *sc = (struct mge_softc *)arg1;
1787 time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1788 error = sysctl_handle_int(oidp, &time, 0, req);
1792 MGE_GLOBAL_LOCK(sc);
1793 if (arg2 == MGE_IC_RX) {
1794 sc->rx_ic_time = time;
1797 sc->tx_ic_time = time;
1800 MGE_GLOBAL_UNLOCK(sc);
1806 mge_add_sysctls(struct mge_softc *sc)
1808 struct sysctl_ctx_list *ctx;
1809 struct sysctl_oid_list *children;
1810 struct sysctl_oid *tree;
1812 ctx = device_get_sysctl_ctx(sc->dev);
1813 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1814 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1815 CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1816 children = SYSCTL_CHILDREN(tree);
1818 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1819 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1820 "I", "IC RX time threshold");
1821 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1822 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1823 "I", "IC TX time threshold");