2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5 * Developed by Semihalf.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of MARVELL nor the names of contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
50 #include <net/ethernet.h>
53 #include <net/if_var.h>
54 #include <net/if_arp.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/if_vlan_var.h>
60 #include <netinet/in_systm.h>
61 #include <netinet/in.h>
62 #include <netinet/ip.h>
64 #include <sys/sockio.h>
66 #include <machine/bus.h>
68 #include <machine/resource.h>
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
73 #include <dev/fdt/fdt_common.h>
74 #include <dev/ofw/ofw_bus.h>
75 #include <dev/ofw/ofw_bus_subr.h>
77 #include <dev/mge/if_mgevar.h>
78 #include <arm/mv/mvreg.h>
79 #include <arm/mv/mvvar.h>
81 #include "miibus_if.h"
83 static int mge_probe(device_t dev);
84 static int mge_attach(device_t dev);
85 static int mge_detach(device_t dev);
86 static int mge_shutdown(device_t dev);
87 static int mge_suspend(device_t dev);
88 static int mge_resume(device_t dev);
90 static int mge_miibus_readreg(device_t dev, int phy, int reg);
91 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
93 static int mge_ifmedia_upd(struct ifnet *ifp);
94 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
96 static void mge_init(void *arg);
97 static void mge_init_locked(void *arg);
98 static void mge_start(struct ifnet *ifp);
99 static void mge_start_locked(struct ifnet *ifp);
100 static void mge_watchdog(struct mge_softc *sc);
101 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
103 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
104 static uint32_t mge_rx_ipg(uint32_t val, int ver);
105 static void mge_ver_params(struct mge_softc *sc);
107 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
108 static void mge_intr_rxtx(void *arg);
109 static void mge_intr_rx(void *arg);
110 static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
111 uint32_t int_cause_ext);
112 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
113 static void mge_intr_tx(void *arg);
114 static void mge_intr_tx_locked(struct mge_softc *sc);
115 static void mge_intr_misc(void *arg);
116 static void mge_intr_sum(void *arg);
117 static void mge_intr_err(void *arg);
118 static void mge_stop(struct mge_softc *sc);
119 static void mge_tick(void *msc);
120 static uint32_t mge_set_port_serial_control(uint32_t media);
121 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
122 static void mge_set_mac_address(struct mge_softc *sc);
123 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
125 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
126 static int mge_allocate_dma(struct mge_softc *sc);
127 static int mge_alloc_desc_dma(struct mge_softc *sc,
128 struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
129 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
130 struct mbuf **mbufp, bus_addr_t *paddr);
131 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
132 static void mge_free_dma(struct mge_softc *sc);
133 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
134 bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
135 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
136 uint32_t status, uint16_t bufsize);
137 static void mge_offload_setup_descriptor(struct mge_softc *sc,
138 struct mge_desc_wrapper *dw);
139 static uint8_t mge_crc8(uint8_t *data, int size);
140 static void mge_setup_multicast(struct mge_softc *sc);
141 static void mge_set_rxic(struct mge_softc *sc);
142 static void mge_set_txic(struct mge_softc *sc);
143 static void mge_add_sysctls(struct mge_softc *sc);
144 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
146 static device_method_t mge_methods[] = {
147 /* Device interface */
148 DEVMETHOD(device_probe, mge_probe),
149 DEVMETHOD(device_attach, mge_attach),
150 DEVMETHOD(device_detach, mge_detach),
151 DEVMETHOD(device_shutdown, mge_shutdown),
152 DEVMETHOD(device_suspend, mge_suspend),
153 DEVMETHOD(device_resume, mge_resume),
155 DEVMETHOD(miibus_readreg, mge_miibus_readreg),
156 DEVMETHOD(miibus_writereg, mge_miibus_writereg),
160 static driver_t mge_driver = {
163 sizeof(struct mge_softc),
166 static devclass_t mge_devclass;
168 DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0);
169 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
170 MODULE_DEPEND(mge, ether, 1, 1, 1);
171 MODULE_DEPEND(mge, miibus, 1, 1, 1);
173 static struct resource_spec res_spec[] = {
174 { SYS_RES_MEMORY, 0, RF_ACTIVE },
175 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
176 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
177 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
182 driver_intr_t *handler;
184 } mge_intrs[MGE_INTR_COUNT + 1] = {
185 { mge_intr_rxtx,"GbE aggregated interrupt" },
186 { mge_intr_rx, "GbE receive interrupt" },
187 { mge_intr_tx, "GbE transmit interrupt" },
188 { mge_intr_misc,"GbE misc interrupt" },
189 { mge_intr_sum, "GbE summary interrupt" },
190 { mge_intr_err, "GbE error interrupt" },
194 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
196 uint32_t mac_l, mac_h;
201 * Retrieve hw address from the device tree.
203 i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
206 for (i = 0; i < 6; i++)
213 bcopy(lmac, addr, 6);
219 * Fall back -- use the currently programmed address.
221 mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
222 mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
224 addr[0] = (mac_h & 0xff000000) >> 24;
225 addr[1] = (mac_h & 0x00ff0000) >> 16;
226 addr[2] = (mac_h & 0x0000ff00) >> 8;
227 addr[3] = (mac_h & 0x000000ff);
228 addr[4] = (mac_l & 0x0000ff00) >> 8;
229 addr[5] = (mac_l & 0x000000ff);
233 mge_tfut_ipg(uint32_t val, int ver)
238 return ((val & 0x3fff) << 4);
241 return ((val & 0xffff) << 4);
246 mge_rx_ipg(uint32_t val, int ver)
251 return ((val & 0x3fff) << 8);
254 return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
259 mge_ver_params(struct mge_softc *sc)
264 if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
265 d == MV_DEV_88F6282 ||
266 d == MV_DEV_MV78100 ||
267 d == MV_DEV_MV78100_Z0 ||
268 (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
271 sc->mge_tfut_ipg_max = 0xFFFF;
272 sc->mge_rx_ipg_max = 0xFFFF;
273 sc->mge_tx_arb_cfg = 0xFC0000FF;
274 sc->mge_tx_tok_cfg = 0xFFFF7FFF;
275 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
279 sc->mge_tfut_ipg_max = 0x3FFF;
280 sc->mge_rx_ipg_max = 0x3FFF;
281 sc->mge_tx_arb_cfg = 0x000000FF;
282 sc->mge_tx_tok_cfg = 0x3FFFFFFF;
283 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
285 if (d == MV_DEV_88RC8180)
286 sc->mge_intr_cnt = 1;
288 sc->mge_intr_cnt = 2;
290 if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
297 mge_set_mac_address(struct mge_softc *sc)
300 uint32_t mac_l, mac_h;
302 MGE_GLOBAL_LOCK_ASSERT(sc);
304 if_mac = (char *)IF_LLADDR(sc->ifp);
306 mac_l = (if_mac[4] << 8) | (if_mac[5]);
307 mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
308 (if_mac[2] << 8) | (if_mac[3] << 0);
310 MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
311 MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
313 mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
317 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
319 uint32_t reg_idx, reg_off, reg_val, i;
322 reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
323 reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
324 reg_val = (1 | (queue << 1)) << reg_off;
326 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
328 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
330 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
335 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
337 uint32_t port_config;
340 /* Enable or disable promiscuous mode as needed */
341 if (sc->ifp->if_flags & IFF_PROMISC) {
342 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
343 port_config |= PORT_CONFIG_UPM;
344 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
346 reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
347 (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
349 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
350 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
351 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
354 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
355 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
358 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
359 port_config &= ~PORT_CONFIG_UPM;
360 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
362 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
363 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
364 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
367 mge_set_mac_address(sc);
372 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
376 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
379 *paddr = segs->ds_addr;
383 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
386 struct mbuf *new_mbuf;
387 bus_dma_segment_t seg[1];
391 KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
393 new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
394 if (new_mbuf == NULL)
396 new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
399 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
400 bus_dmamap_unload(tag, map);
403 error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
405 KASSERT(nsegs == 1, ("Too many segments returned!"));
406 if (nsegs != 1 || error)
407 panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
409 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
412 (*paddr) = seg->ds_addr;
417 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
418 uint32_t size, bus_dma_tag_t *buffer_tag)
420 struct mge_desc_wrapper *dw;
421 bus_addr_t desc_paddr;
425 for (i = size - 1; i >= 0; i--) {
427 error = bus_dmamem_alloc(sc->mge_desc_dtag,
428 (void**)&(dw->mge_desc),
429 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
433 if_printf(sc->ifp, "failed to allocate DMA memory\n");
438 error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
439 dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
440 &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
443 if_printf(sc->ifp, "can't load descriptor\n");
444 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
450 /* Chain descriptors */
451 dw->mge_desc->next_desc = desc_paddr;
452 desc_paddr = dw->mge_desc_paddr;
454 tab[size - 1].mge_desc->next_desc = desc_paddr;
456 /* Allocate a busdma tag for mbufs. */
457 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
458 1, 0, /* alignment, boundary */
459 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
460 BUS_SPACE_MAXADDR, /* highaddr */
461 NULL, NULL, /* filtfunc, filtfuncarg */
462 MCLBYTES, 1, /* maxsize, nsegments */
463 MCLBYTES, 0, /* maxsegsz, flags */
464 NULL, NULL, /* lockfunc, lockfuncarg */
465 buffer_tag); /* dmat */
467 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
471 /* Create TX busdma maps */
472 for (i = 0; i < size; i++) {
474 error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
476 if_printf(sc->ifp, "failed to create map for mbuf\n");
480 dw->buffer = (struct mbuf*)NULL;
481 dw->mge_desc->buffer = (bus_addr_t)NULL;
488 mge_allocate_dma(struct mge_softc *sc)
491 struct mge_desc_wrapper *dw;
494 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
495 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
496 16, 0, /* alignment, boundary */
497 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
498 BUS_SPACE_MAXADDR, /* highaddr */
499 NULL, NULL, /* filtfunc, filtfuncarg */
500 sizeof(struct mge_desc), 1, /* maxsize, nsegments */
501 sizeof(struct mge_desc), 0, /* maxsegsz, flags */
502 NULL, NULL, /* lockfunc, lockfuncarg */
503 &sc->mge_desc_dtag); /* dmat */
506 mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
508 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
511 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
512 dw = &(sc->mge_rx_desc[i]);
513 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
514 &dw->mge_desc->buffer);
517 sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
518 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
524 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
525 uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
527 struct mge_desc_wrapper *dw;
530 for (i = 0; i < size; i++) {
534 if (dw->buffer_dmap) {
536 bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
537 BUS_DMASYNC_POSTREAD);
538 bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
540 bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
544 /* Free RX descriptors */
546 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
547 BUS_DMASYNC_POSTREAD);
548 bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
549 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
556 mge_free_dma(struct mge_softc *sc)
558 /* Free desciptors and mbufs */
559 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
560 mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
562 /* Destroy mbuf dma tag */
563 bus_dma_tag_destroy(sc->mge_tx_dtag);
564 bus_dma_tag_destroy(sc->mge_rx_dtag);
565 /* Destroy descriptors tag */
566 bus_dma_tag_destroy(sc->mge_desc_dtag);
570 mge_reinit_rx(struct mge_softc *sc)
572 struct mge_desc_wrapper *dw;
575 MGE_RECEIVE_LOCK_ASSERT(sc);
577 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
579 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
582 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
583 dw = &(sc->mge_rx_desc[i]);
584 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
585 &dw->mge_desc->buffer);
588 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
589 sc->rx_desc_curr = 0;
591 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
594 /* Enable RX queue */
595 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
598 #ifdef DEVICE_POLLING
599 static poll_handler_t mge_poll;
602 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
604 struct mge_softc *sc = ifp->if_softc;
605 uint32_t int_cause, int_cause_ext;
610 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
611 MGE_GLOBAL_UNLOCK(sc);
615 if (cmd == POLL_AND_CHECK_STATUS) {
616 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
617 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
619 /* Check for resource error */
620 if (int_cause & MGE_PORT_INT_RXERRQ0)
623 if (int_cause || int_cause_ext) {
624 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
625 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
629 mge_intr_tx_locked(sc);
630 rx_npkts = mge_intr_rx_locked(sc, count);
632 MGE_GLOBAL_UNLOCK(sc);
635 #endif /* DEVICE_POLLING */
638 mge_attach(device_t dev)
640 struct mge_softc *sc;
641 struct mii_softc *miisc;
643 uint8_t hwaddr[ETHER_ADDR_LEN];
646 sc = device_get_softc(dev);
648 sc->node = ofw_bus_get_node(dev);
650 /* Set chip version-dependent parameters */
653 /* Get phy address and used softc from fdt */
654 if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) != 0)
657 /* Initialize mutexes */
658 mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
659 mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
661 /* Allocate IO and IRQ resources */
662 error = bus_alloc_resources(dev, res_spec, sc->res);
664 device_printf(dev, "could not allocate resources\n");
669 /* Allocate DMA, buffers, buffer descriptors */
670 error = mge_allocate_dma(sc);
676 sc->tx_desc_curr = 0;
677 sc->rx_desc_curr = 0;
678 sc->tx_desc_used_idx = 0;
679 sc->tx_desc_used_count = 0;
681 /* Configure defaults for interrupts coalescing */
682 sc->rx_ic_time = 768;
683 sc->tx_ic_time = 768;
686 /* Allocate network interface */
687 ifp = sc->ifp = if_alloc(IFT_ETHER);
689 device_printf(dev, "if_alloc() failed\n");
694 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
696 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
697 ifp->if_capabilities = IFCAP_VLAN_MTU;
698 if (sc->mge_hw_csum) {
699 ifp->if_capabilities |= IFCAP_HWCSUM;
700 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
702 ifp->if_capenable = ifp->if_capabilities;
704 #ifdef DEVICE_POLLING
705 /* Advertise that polling is supported */
706 ifp->if_capabilities |= IFCAP_POLLING;
709 ifp->if_init = mge_init;
710 ifp->if_start = mge_start;
711 ifp->if_ioctl = mge_ioctl;
713 ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
714 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
715 IFQ_SET_READY(&ifp->if_snd);
717 mge_get_mac_address(sc, hwaddr);
718 ether_ifattach(ifp, hwaddr);
719 callout_init(&sc->wd_callout, 0);
722 error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
723 mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
725 device_printf(dev, "attaching PHYs failed\n");
729 sc->mii = device_get_softc(sc->miibus);
731 /* Tell the MAC where to find the PHY so autoneg works */
732 miisc = LIST_FIRST(&sc->mii->mii_phys);
733 MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
735 /* Attach interrupt handlers */
736 /* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
737 for (i = 1; i <= sc->mge_intr_cnt; ++i) {
738 error = bus_setup_intr(dev, sc->res[i],
739 INTR_TYPE_NET | INTR_MPSAFE,
740 NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
741 sc, &sc->ih_cookie[i - 1]);
743 device_printf(dev, "could not setup %s\n",
744 mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
754 mge_detach(device_t dev)
756 struct mge_softc *sc;
759 sc = device_get_softc(dev);
761 /* Stop controller and free TX queue */
765 /* Wait for stopping ticks */
766 callout_drain(&sc->wd_callout);
768 /* Stop and release all interrupts */
769 for (i = 0; i < sc->mge_intr_cnt; ++i) {
770 if (!sc->ih_cookie[i])
773 error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
775 device_printf(dev, "could not release %s\n",
776 mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
779 /* Detach network interface */
781 ether_ifdetach(sc->ifp);
785 /* Free DMA resources */
788 /* Free IO memory handler */
789 bus_release_resources(dev, res_spec, sc->res);
791 /* Destroy mutexes */
792 mtx_destroy(&sc->receive_lock);
793 mtx_destroy(&sc->transmit_lock);
799 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
801 struct mge_softc *sc = ifp->if_softc;
802 struct mii_data *mii;
804 MGE_TRANSMIT_LOCK(sc);
809 ifmr->ifm_active = mii->mii_media_active;
810 ifmr->ifm_status = mii->mii_media_status;
812 MGE_TRANSMIT_UNLOCK(sc);
816 mge_set_port_serial_control(uint32_t media)
818 uint32_t port_config;
820 port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
821 PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
823 if (IFM_TYPE(media) == IFM_ETHER) {
824 switch(IFM_SUBTYPE(media)) {
828 port_config |= (PORT_SERIAL_GMII_SPEED_1000 |
829 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
830 PORT_SERIAL_SPEED_AUTONEG);
833 port_config |= (PORT_SERIAL_MII_SPEED_100 |
834 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
835 PORT_SERIAL_SPEED_AUTONEG);
838 port_config |= (PORT_SERIAL_AUTONEG |
839 PORT_SERIAL_AUTONEG_FC |
840 PORT_SERIAL_SPEED_AUTONEG);
844 port_config |= PORT_SERIAL_FULL_DUPLEX;
846 return (port_config);
850 mge_ifmedia_upd(struct ifnet *ifp)
852 struct mge_softc *sc = ifp->if_softc;
854 if (ifp->if_flags & IFF_UP) {
857 sc->mge_media_status = sc->mii->mii_media.ifm_media;
858 mii_mediachg(sc->mii);
861 MGE_GLOBAL_UNLOCK(sc);
870 struct mge_softc *sc = arg;
874 mge_init_locked(arg);
876 MGE_GLOBAL_UNLOCK(sc);
880 mge_init_locked(void *arg)
882 struct mge_softc *sc = arg;
883 struct mge_desc_wrapper *dw;
884 volatile uint32_t reg_val;
888 MGE_GLOBAL_LOCK_ASSERT(sc);
893 /* Disable interrupts */
894 mge_intrs_ctrl(sc, 0);
896 /* Set MAC address */
897 mge_set_mac_address(sc);
899 /* Setup multicast filters */
900 mge_setup_multicast(sc);
902 if (sc->mge_ver == 2) {
903 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
904 MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
907 /* Initialize TX queue configuration registers */
908 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
909 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
910 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
912 /* Clear TX queue configuration registers for unused queues */
913 for (i = 1; i < 7; i++) {
914 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
915 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
916 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
919 /* Set default MTU */
920 MGE_WRITE(sc, sc->mge_mtu, 0);
922 /* Port configuration */
923 MGE_WRITE(sc, MGE_PORT_CONFIG,
924 PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
925 PORT_CONFIG_ARO_RXQ(0));
926 MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
928 /* Setup port configuration */
929 reg_val = mge_set_port_serial_control(sc->mge_media_status);
930 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
932 /* Setup SDMA configuration */
933 MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
934 MGE_SDMA_TX_BYTE_SWAP |
935 MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
936 MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
938 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
940 MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
941 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
944 /* Reset descriptor indexes */
945 sc->tx_desc_curr = 0;
946 sc->rx_desc_curr = 0;
947 sc->tx_desc_used_idx = 0;
948 sc->tx_desc_used_count = 0;
950 /* Enable RX descriptors */
951 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
952 dw = &sc->mge_rx_desc[i];
953 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
954 dw->mge_desc->buff_size = MCLBYTES;
955 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
956 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
959 /* Enable RX queue */
960 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
963 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
964 reg_val |= PORT_SERIAL_ENABLE;
965 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
968 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
969 if (reg_val & MGE_STATUS_LINKUP)
973 if_printf(sc->ifp, "Timeout on link-up\n");
978 /* Setup interrupts coalescing */
982 /* Enable interrupts */
983 #ifdef DEVICE_POLLING
985 * * ...only if polling is not turned on. Disable interrupts explicitly
986 * if polling is enabled.
988 if (sc->ifp->if_capenable & IFCAP_POLLING)
989 mge_intrs_ctrl(sc, 0);
991 #endif /* DEVICE_POLLING */
992 mge_intrs_ctrl(sc, 1);
994 /* Activate network interface */
995 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
996 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
999 /* Schedule watchdog timeout */
1000 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1004 mge_intr_rxtx(void *arg)
1006 struct mge_softc *sc = arg;
1007 uint32_t int_cause, int_cause_ext;
1009 MGE_GLOBAL_LOCK(sc);
1011 #ifdef DEVICE_POLLING
1012 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1013 MGE_GLOBAL_UNLOCK(sc);
1018 /* Get interrupt cause */
1019 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1020 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1022 /* Check for Transmit interrupt */
1023 if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
1024 MGE_PORT_INT_EXT_TXUR)) {
1025 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1026 (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1027 mge_intr_tx_locked(sc);
1030 MGE_TRANSMIT_UNLOCK(sc);
1032 /* Check for Receive interrupt */
1033 mge_intr_rx_check(sc, int_cause, int_cause_ext);
1035 MGE_RECEIVE_UNLOCK(sc);
1039 mge_intr_err(void *arg)
1041 struct mge_softc *sc = arg;
1045 if_printf(ifp, "%s\n", __FUNCTION__);
1049 mge_intr_misc(void *arg)
1051 struct mge_softc *sc = arg;
1055 if_printf(ifp, "%s\n", __FUNCTION__);
1059 mge_intr_rx(void *arg) {
1060 struct mge_softc *sc = arg;
1061 uint32_t int_cause, int_cause_ext;
1063 MGE_RECEIVE_LOCK(sc);
1065 #ifdef DEVICE_POLLING
1066 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1067 MGE_RECEIVE_UNLOCK(sc);
1072 /* Get interrupt cause */
1073 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1074 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1076 mge_intr_rx_check(sc, int_cause, int_cause_ext);
1078 MGE_RECEIVE_UNLOCK(sc);
1082 mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
1083 uint32_t int_cause_ext)
1085 /* Check for resource error */
1086 if (int_cause & MGE_PORT_INT_RXERRQ0) {
1088 MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1089 ~(int_cause & MGE_PORT_INT_RXERRQ0));
1092 int_cause &= MGE_PORT_INT_RXQ0;
1093 int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1095 if (int_cause || int_cause_ext) {
1096 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1097 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1098 mge_intr_rx_locked(sc, -1);
1103 mge_intr_rx_locked(struct mge_softc *sc, int count)
1105 struct ifnet *ifp = sc->ifp;
1108 struct mge_desc_wrapper* dw;
1112 MGE_RECEIVE_LOCK_ASSERT(sc);
1114 while (count != 0) {
1115 dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1116 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1117 BUS_DMASYNC_POSTREAD);
1120 status = dw->mge_desc->cmd_status;
1121 bufsize = dw->mge_desc->buff_size;
1122 if ((status & MGE_DMA_OWNED) != 0)
1125 if (dw->mge_desc->byte_count &&
1126 ~(status & MGE_ERR_SUMMARY)) {
1128 bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1129 BUS_DMASYNC_POSTREAD);
1131 mb = m_devget(dw->buffer->m_data,
1132 dw->mge_desc->byte_count - ETHER_CRC_LEN,
1136 /* Give up if no mbufs */
1140 mb->m_pkthdr.len -= 2;
1143 mge_offload_process_frame(ifp, mb, status,
1146 MGE_RECEIVE_UNLOCK(sc);
1147 (*ifp->if_input)(ifp, mb);
1148 MGE_RECEIVE_LOCK(sc);
1152 dw->mge_desc->byte_count = 0;
1153 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1154 sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1155 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1156 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1166 mge_intr_sum(void *arg)
1168 struct mge_softc *sc = arg;
1172 if_printf(ifp, "%s\n", __FUNCTION__);
1176 mge_intr_tx(void *arg)
1178 struct mge_softc *sc = arg;
1179 uint32_t int_cause_ext;
1181 MGE_TRANSMIT_LOCK(sc);
1183 #ifdef DEVICE_POLLING
1184 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1185 MGE_TRANSMIT_UNLOCK(sc);
1190 /* Ack the interrupt */
1191 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1192 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1193 (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1195 mge_intr_tx_locked(sc);
1197 MGE_TRANSMIT_UNLOCK(sc);
1202 mge_intr_tx_locked(struct mge_softc *sc)
1204 struct ifnet *ifp = sc->ifp;
1205 struct mge_desc_wrapper *dw;
1206 struct mge_desc *desc;
1210 MGE_TRANSMIT_LOCK_ASSERT(sc);
1212 /* Disable watchdog */
1215 while (sc->tx_desc_used_count) {
1216 /* Get the descriptor */
1217 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1218 desc = dw->mge_desc;
1219 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1220 BUS_DMASYNC_POSTREAD);
1222 /* Get descriptor status */
1223 status = desc->cmd_status;
1225 if (status & MGE_DMA_OWNED)
1228 sc->tx_desc_used_idx =
1229 (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1230 sc->tx_desc_used_count--;
1232 /* Update collision statistics */
1233 if (status & MGE_ERR_SUMMARY) {
1234 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1235 ifp->if_collisions++;
1236 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1237 ifp->if_collisions += 16;
1240 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1241 BUS_DMASYNC_POSTWRITE);
1242 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1243 m_freem(dw->buffer);
1244 dw->buffer = (struct mbuf*)NULL;
1251 /* Now send anything that was pending */
1252 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1253 mge_start_locked(ifp);
1258 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1260 struct mge_softc *sc = ifp->if_softc;
1261 struct ifreq *ifr = (struct ifreq *)data;
1269 MGE_GLOBAL_LOCK(sc);
1271 if (ifp->if_flags & IFF_UP) {
1272 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1273 flags = ifp->if_flags ^ sc->mge_if_flags;
1274 if (flags & IFF_PROMISC)
1275 mge_set_prom_mode(sc,
1276 MGE_RX_DEFAULT_QUEUE);
1278 if (flags & IFF_ALLMULTI)
1279 mge_setup_multicast(sc);
1281 mge_init_locked(sc);
1283 else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1286 sc->mge_if_flags = ifp->if_flags;
1287 MGE_GLOBAL_UNLOCK(sc);
1291 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1292 MGE_GLOBAL_LOCK(sc);
1293 mge_setup_multicast(sc);
1294 MGE_GLOBAL_UNLOCK(sc);
1298 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1299 if (mask & IFCAP_HWCSUM) {
1300 ifp->if_capenable &= ~IFCAP_HWCSUM;
1301 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1302 if (ifp->if_capenable & IFCAP_TXCSUM)
1303 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1305 ifp->if_hwassist = 0;
1307 #ifdef DEVICE_POLLING
1308 if (mask & IFCAP_POLLING) {
1309 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1310 error = ether_poll_register(mge_poll, ifp);
1314 MGE_GLOBAL_LOCK(sc);
1315 mge_intrs_ctrl(sc, 0);
1316 ifp->if_capenable |= IFCAP_POLLING;
1317 MGE_GLOBAL_UNLOCK(sc);
1319 error = ether_poll_deregister(ifp);
1320 MGE_GLOBAL_LOCK(sc);
1321 mge_intrs_ctrl(sc, 1);
1322 ifp->if_capenable &= ~IFCAP_POLLING;
1323 MGE_GLOBAL_UNLOCK(sc);
1328 case SIOCGIFMEDIA: /* fall through */
1330 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1331 && !(ifr->ifr_media & IFM_FDX)) {
1332 device_printf(sc->dev,
1333 "1000baseTX half-duplex unsupported\n");
1336 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1339 error = ether_ioctl(ifp, command, data);
1345 mge_miibus_readreg(device_t dev, int phy, int reg)
1347 struct mge_softc *sc;
1350 sc = device_get_softc(dev);
1352 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
1353 (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1355 retries = MGE_SMI_READ_RETRIES;
1357 !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
1358 DELAY(MGE_SMI_READ_DELAY);
1361 device_printf(dev, "Timeout while reading from PHY\n");
1363 return (MGE_READ(sc->phy_sc, MGE_REG_SMI) & 0xffff);
1367 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1369 struct mge_softc *sc;
1372 sc = device_get_softc(dev);
1374 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
1375 (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1377 retries = MGE_SMI_WRITE_RETRIES;
1378 while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
1379 DELAY(MGE_SMI_WRITE_DELAY);
1382 device_printf(dev, "Timeout while writing to PHY\n");
1387 mge_probe(device_t dev)
1390 if (!ofw_bus_status_okay(dev))
1393 if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1396 device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1397 return (BUS_PROBE_DEFAULT);
1401 mge_resume(device_t dev)
1404 device_printf(dev, "%s\n", __FUNCTION__);
1409 mge_shutdown(device_t dev)
1411 struct mge_softc *sc = device_get_softc(dev);
1413 MGE_GLOBAL_LOCK(sc);
1415 #ifdef DEVICE_POLLING
1416 if (sc->ifp->if_capenable & IFCAP_POLLING)
1417 ether_poll_deregister(sc->ifp);
1422 MGE_GLOBAL_UNLOCK(sc);
1428 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1430 struct mge_desc_wrapper *dw = NULL;
1432 bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1440 /* Check for free descriptors */
1441 if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1442 /* No free descriptors */
1446 /* Fetch unused map */
1447 desc_no = sc->tx_desc_curr;
1448 dw = &sc->mge_tx_desc[desc_no];
1449 mapp = dw->buffer_dmap;
1451 /* Create mapping in DMA memory */
1452 error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1454 if (error != 0 || nsegs != 1 ) {
1455 bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1456 return ((error != 0) ? error : -1);
1459 bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1461 /* Everything is ok, now we can send buffers */
1462 for (seg = 0; seg < nsegs; seg++) {
1463 dw->mge_desc->byte_count = segs[seg].ds_len;
1464 dw->mge_desc->buffer = segs[seg].ds_addr;
1466 dw->mge_desc->cmd_status = 0;
1468 mge_offload_setup_descriptor(sc, dw);
1469 dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1470 MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1474 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1475 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1477 sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1478 sc->tx_desc_used_count++;
1485 struct mge_softc *sc = msc;
1487 /* Check for TX timeout */
1492 /* Check for media type change */
1493 if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1494 mge_ifmedia_upd(sc->ifp);
1496 /* Schedule another timeout one second from now */
1497 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1501 mge_watchdog(struct mge_softc *sc)
1507 MGE_GLOBAL_LOCK(sc);
1509 if (sc->wd_timer == 0 || --sc->wd_timer) {
1510 MGE_GLOBAL_UNLOCK(sc);
1515 if_printf(ifp, "watchdog timeout\n");
1518 mge_init_locked(sc);
1520 MGE_GLOBAL_UNLOCK(sc);
1524 mge_start(struct ifnet *ifp)
1526 struct mge_softc *sc = ifp->if_softc;
1528 MGE_TRANSMIT_LOCK(sc);
1530 mge_start_locked(ifp);
1532 MGE_TRANSMIT_UNLOCK(sc);
1536 mge_start_locked(struct ifnet *ifp)
1538 struct mge_softc *sc;
1539 struct mbuf *m0, *mtmp;
1540 uint32_t reg_val, queued = 0;
1544 MGE_TRANSMIT_LOCK_ASSERT(sc);
1546 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1551 /* Get packet from the queue */
1552 IF_DEQUEUE(&ifp->if_snd, m0);
1556 mtmp = m_defrag(m0, M_NOWAIT);
1560 if (mge_encap(sc, m0)) {
1561 IF_PREPEND(&ifp->if_snd, m0);
1562 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1570 /* Enable transmitter and watchdog timer */
1571 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1572 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1578 mge_stop(struct mge_softc *sc)
1581 volatile uint32_t reg_val, status;
1582 struct mge_desc_wrapper *dw;
1583 struct mge_desc *desc;
1588 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1591 /* Stop tick engine */
1592 callout_stop(&sc->wd_callout);
1594 /* Disable interface */
1595 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1598 /* Disable interrupts */
1599 mge_intrs_ctrl(sc, 0);
1601 /* Disable Rx and Tx */
1602 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1603 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1604 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1606 /* Remove pending data from TX queue */
1607 while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1608 sc->tx_desc_used_count) {
1609 /* Get the descriptor */
1610 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1611 desc = dw->mge_desc;
1612 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1613 BUS_DMASYNC_POSTREAD);
1615 /* Get descriptor status */
1616 status = desc->cmd_status;
1618 if (status & MGE_DMA_OWNED)
1621 sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1623 sc->tx_desc_used_count--;
1625 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1626 BUS_DMASYNC_POSTWRITE);
1627 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1629 m_freem(dw->buffer);
1630 dw->buffer = (struct mbuf*)NULL;
1633 /* Wait for end of transmission */
1636 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1637 if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1638 (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1644 if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1647 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1648 reg_val &= ~(PORT_SERIAL_ENABLE);
1649 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1653 mge_suspend(device_t dev)
1656 device_printf(dev, "%s\n", __FUNCTION__);
1661 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1662 uint32_t status, uint16_t bufsize)
1666 if (ifp->if_capenable & IFCAP_RXCSUM) {
1667 if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1668 csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1670 if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1671 (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1672 (status & MGE_RX_L4_CSUM_OK)) {
1673 csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1674 frame->m_pkthdr.csum_data = 0xFFFF;
1677 frame->m_pkthdr.csum_flags = csum_flags;
1682 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1684 struct mbuf *m0 = dw->buffer;
1685 struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1686 int csum_flags = m0->m_pkthdr.csum_flags;
1692 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1693 etype = ntohs(eh->evl_proto);
1694 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1695 csum_flags |= MGE_TX_VLAN_TAGGED;
1697 etype = ntohs(eh->evl_encap_proto);
1698 ehlen = ETHER_HDR_LEN;
1701 if (etype != ETHERTYPE_IP) {
1703 "TCP/IP Offload enabled for unsupported "
1708 ip = (struct ip *)(m0->m_data + ehlen);
1709 cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1710 cmd_status |= MGE_TX_NOT_FRAGMENT;
1713 if (csum_flags & CSUM_IP)
1714 cmd_status |= MGE_TX_GEN_IP_CSUM;
1716 if (csum_flags & CSUM_TCP)
1717 cmd_status |= MGE_TX_GEN_L4_CSUM;
1719 if (csum_flags & CSUM_UDP)
1720 cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1722 dw->mge_desc->cmd_status |= cmd_status;
1726 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1730 MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1731 MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1732 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1733 MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1734 MGE_PORT_INT_EXT_TXBUF0);
1736 MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1737 MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1739 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1740 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1742 MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1743 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1748 mge_crc8(uint8_t *data, int size)
1751 static const uint8_t ct[256] = {
1752 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1753 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1754 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1755 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1756 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1757 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1758 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1759 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1760 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1761 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1762 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1763 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1764 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1765 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1766 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1767 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1768 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1769 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1770 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1771 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1772 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1773 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1774 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1775 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1776 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1777 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1778 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1779 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1780 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1781 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1782 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1783 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1787 crc = ct[crc ^ *(data++)];
1793 mge_setup_multicast(struct mge_softc *sc)
1795 uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1796 uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1797 uint32_t smt[MGE_MCAST_REG_NUMBER];
1798 uint32_t omt[MGE_MCAST_REG_NUMBER];
1799 struct ifnet *ifp = sc->ifp;
1800 struct ifmultiaddr *ifma;
1804 if (ifp->if_flags & IFF_ALLMULTI) {
1805 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1806 smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1808 memset(smt, 0, sizeof(smt));
1809 memset(omt, 0, sizeof(omt));
1811 if_maddr_rlock(ifp);
1812 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1813 if (ifma->ifma_addr->sa_family != AF_LINK)
1816 mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1817 if (memcmp(mac, special, sizeof(special)) == 0) {
1819 smt[i >> 2] |= v << ((i & 0x03) << 3);
1821 i = mge_crc8(mac, ETHER_ADDR_LEN);
1822 omt[i >> 2] |= v << ((i & 0x03) << 3);
1825 if_maddr_runlock(ifp);
1828 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1829 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1830 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1835 mge_set_rxic(struct mge_softc *sc)
1839 if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1840 sc->rx_ic_time = sc->mge_rx_ipg_max;
1842 reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1843 reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1844 reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1845 MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1849 mge_set_txic(struct mge_softc *sc)
1853 if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1854 sc->tx_ic_time = sc->mge_tfut_ipg_max;
1856 reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1857 reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1858 reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1859 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1863 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1865 struct mge_softc *sc = (struct mge_softc *)arg1;
1869 time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1870 error = sysctl_handle_int(oidp, &time, 0, req);
1874 MGE_GLOBAL_LOCK(sc);
1875 if (arg2 == MGE_IC_RX) {
1876 sc->rx_ic_time = time;
1879 sc->tx_ic_time = time;
1882 MGE_GLOBAL_UNLOCK(sc);
1888 mge_add_sysctls(struct mge_softc *sc)
1890 struct sysctl_ctx_list *ctx;
1891 struct sysctl_oid_list *children;
1892 struct sysctl_oid *tree;
1894 ctx = device_get_sysctl_ctx(sc->dev);
1895 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1896 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1897 CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1898 children = SYSCTL_CHILDREN(tree);
1900 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1901 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1902 "I", "IC RX time threshold");
1903 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1904 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1905 "I", "IC TX time threshold");