2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5 * Developed by Semihalf.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of MARVELL nor the names of contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
50 #include <net/ethernet.h>
53 #include <net/if_var.h>
54 #include <net/if_arp.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/if_vlan_var.h>
60 #include <netinet/in_systm.h>
61 #include <netinet/in.h>
62 #include <netinet/ip.h>
64 #include <sys/sockio.h>
66 #include <machine/bus.h>
68 #include <machine/resource.h>
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
73 #include <dev/fdt/fdt_common.h>
74 #include <dev/ofw/ofw_bus.h>
75 #include <dev/ofw/ofw_bus_subr.h>
77 #include <dev/mge/if_mgevar.h>
78 #include <arm/mv/mvreg.h>
79 #include <arm/mv/mvvar.h>
81 #include "miibus_if.h"
83 static int mge_probe(device_t dev);
84 static int mge_attach(device_t dev);
85 static int mge_detach(device_t dev);
86 static int mge_shutdown(device_t dev);
87 static int mge_suspend(device_t dev);
88 static int mge_resume(device_t dev);
90 static int mge_miibus_readreg(device_t dev, int phy, int reg);
91 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
93 static int mge_ifmedia_upd(struct ifnet *ifp);
94 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
96 static void mge_init(void *arg);
97 static void mge_init_locked(void *arg);
98 static void mge_start(struct ifnet *ifp);
99 static void mge_start_locked(struct ifnet *ifp);
100 static void mge_watchdog(struct mge_softc *sc);
101 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
103 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
104 static uint32_t mge_rx_ipg(uint32_t val, int ver);
105 static void mge_ver_params(struct mge_softc *sc);
107 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
108 static void mge_intr_rxtx(void *arg);
109 static void mge_intr_rx(void *arg);
110 static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
111 uint32_t int_cause_ext);
112 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
113 static void mge_intr_tx(void *arg);
114 static void mge_intr_tx_locked(struct mge_softc *sc);
115 static void mge_intr_misc(void *arg);
116 static void mge_intr_sum(void *arg);
117 static void mge_intr_err(void *arg);
118 static void mge_stop(struct mge_softc *sc);
119 static void mge_tick(void *msc);
120 static uint32_t mge_set_port_serial_control(uint32_t media);
121 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
122 static void mge_set_mac_address(struct mge_softc *sc);
123 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
125 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
126 static int mge_allocate_dma(struct mge_softc *sc);
127 static int mge_alloc_desc_dma(struct mge_softc *sc,
128 struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
129 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
130 struct mbuf **mbufp, bus_addr_t *paddr);
131 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
132 static void mge_free_dma(struct mge_softc *sc);
133 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
134 bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
135 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
136 uint32_t status, uint16_t bufsize);
137 static void mge_offload_setup_descriptor(struct mge_softc *sc,
138 struct mge_desc_wrapper *dw);
139 static uint8_t mge_crc8(uint8_t *data, int size);
140 static void mge_setup_multicast(struct mge_softc *sc);
141 static void mge_set_rxic(struct mge_softc *sc);
142 static void mge_set_txic(struct mge_softc *sc);
143 static void mge_add_sysctls(struct mge_softc *sc);
144 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
146 static device_method_t mge_methods[] = {
147 /* Device interface */
148 DEVMETHOD(device_probe, mge_probe),
149 DEVMETHOD(device_attach, mge_attach),
150 DEVMETHOD(device_detach, mge_detach),
151 DEVMETHOD(device_shutdown, mge_shutdown),
152 DEVMETHOD(device_suspend, mge_suspend),
153 DEVMETHOD(device_resume, mge_resume),
155 DEVMETHOD(miibus_readreg, mge_miibus_readreg),
156 DEVMETHOD(miibus_writereg, mge_miibus_writereg),
160 static driver_t mge_driver = {
163 sizeof(struct mge_softc),
166 static devclass_t mge_devclass;
168 DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0);
169 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
170 MODULE_DEPEND(mge, ether, 1, 1, 1);
171 MODULE_DEPEND(mge, miibus, 1, 1, 1);
173 static struct resource_spec res_spec[] = {
174 { SYS_RES_MEMORY, 0, RF_ACTIVE },
175 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
176 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
177 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
182 driver_intr_t *handler;
184 } mge_intrs[MGE_INTR_COUNT + 1] = {
185 { mge_intr_rxtx,"GbE aggregated interrupt" },
186 { mge_intr_rx, "GbE receive interrupt" },
187 { mge_intr_tx, "GbE transmit interrupt" },
188 { mge_intr_misc,"GbE misc interrupt" },
189 { mge_intr_sum, "GbE summary interrupt" },
190 { mge_intr_err, "GbE error interrupt" },
194 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
196 uint32_t mac_l, mac_h;
201 * Retrieve hw address from the device tree.
203 i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
206 for (i = 0; i < 6; i++)
213 bcopy(lmac, addr, 6);
219 * Fall back -- use the currently programmed address.
221 mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
222 mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
224 addr[0] = (mac_h & 0xff000000) >> 24;
225 addr[1] = (mac_h & 0x00ff0000) >> 16;
226 addr[2] = (mac_h & 0x0000ff00) >> 8;
227 addr[3] = (mac_h & 0x000000ff);
228 addr[4] = (mac_l & 0x0000ff00) >> 8;
229 addr[5] = (mac_l & 0x000000ff);
233 mge_tfut_ipg(uint32_t val, int ver)
238 return ((val & 0x3fff) << 4);
241 return ((val & 0xffff) << 4);
246 mge_rx_ipg(uint32_t val, int ver)
251 return ((val & 0x3fff) << 8);
254 return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
259 mge_ver_params(struct mge_softc *sc)
264 if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
265 d == MV_DEV_88F6282 ||
266 d == MV_DEV_MV78100 ||
267 d == MV_DEV_MV78100_Z0 ||
268 (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
271 sc->mge_tfut_ipg_max = 0xFFFF;
272 sc->mge_rx_ipg_max = 0xFFFF;
273 sc->mge_tx_arb_cfg = 0xFC0000FF;
274 sc->mge_tx_tok_cfg = 0xFFFF7FFF;
275 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
279 sc->mge_tfut_ipg_max = 0x3FFF;
280 sc->mge_rx_ipg_max = 0x3FFF;
281 sc->mge_tx_arb_cfg = 0x000000FF;
282 sc->mge_tx_tok_cfg = 0x3FFFFFFF;
283 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
285 if (d == MV_DEV_88RC8180)
286 sc->mge_intr_cnt = 1;
288 sc->mge_intr_cnt = 2;
290 if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
297 mge_set_mac_address(struct mge_softc *sc)
300 uint32_t mac_l, mac_h;
302 MGE_GLOBAL_LOCK_ASSERT(sc);
304 if_mac = (char *)IF_LLADDR(sc->ifp);
306 mac_l = (if_mac[4] << 8) | (if_mac[5]);
307 mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
308 (if_mac[2] << 8) | (if_mac[3] << 0);
310 MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
311 MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
313 mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
317 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
319 uint32_t reg_idx, reg_off, reg_val, i;
322 reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
323 reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
324 reg_val = (1 | (queue << 1)) << reg_off;
326 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
328 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
330 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
335 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
337 uint32_t port_config;
340 /* Enable or disable promiscuous mode as needed */
341 if (sc->ifp->if_flags & IFF_PROMISC) {
342 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
343 port_config |= PORT_CONFIG_UPM;
344 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
346 reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
347 (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
349 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
350 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
351 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
354 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
355 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
358 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
359 port_config &= ~PORT_CONFIG_UPM;
360 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
362 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
363 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
364 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
367 mge_set_mac_address(sc);
372 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
376 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
379 *paddr = segs->ds_addr;
383 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
386 struct mbuf *new_mbuf;
387 bus_dma_segment_t seg[1];
391 KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
393 new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
394 if (new_mbuf == NULL)
396 new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
399 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
400 bus_dmamap_unload(tag, map);
403 error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
405 KASSERT(nsegs == 1, ("Too many segments returned!"));
406 if (nsegs != 1 || error)
407 panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
409 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
412 (*paddr) = seg->ds_addr;
417 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
418 uint32_t size, bus_dma_tag_t *buffer_tag)
420 struct mge_desc_wrapper *dw;
421 bus_addr_t desc_paddr;
425 for (i = size - 1; i >= 0; i--) {
427 error = bus_dmamem_alloc(sc->mge_desc_dtag,
428 (void**)&(dw->mge_desc),
429 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
433 if_printf(sc->ifp, "failed to allocate DMA memory\n");
438 error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
439 dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
440 &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
443 if_printf(sc->ifp, "can't load descriptor\n");
444 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
450 /* Chain descriptors */
451 dw->mge_desc->next_desc = desc_paddr;
452 desc_paddr = dw->mge_desc_paddr;
454 tab[size - 1].mge_desc->next_desc = desc_paddr;
456 /* Allocate a busdma tag for mbufs. */
457 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
458 1, 0, /* alignment, boundary */
459 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
460 BUS_SPACE_MAXADDR, /* highaddr */
461 NULL, NULL, /* filtfunc, filtfuncarg */
462 MCLBYTES, 1, /* maxsize, nsegments */
463 MCLBYTES, 0, /* maxsegsz, flags */
464 NULL, NULL, /* lockfunc, lockfuncarg */
465 buffer_tag); /* dmat */
467 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
471 /* Create TX busdma maps */
472 for (i = 0; i < size; i++) {
474 error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
476 if_printf(sc->ifp, "failed to create map for mbuf\n");
480 dw->buffer = (struct mbuf*)NULL;
481 dw->mge_desc->buffer = (bus_addr_t)NULL;
488 mge_allocate_dma(struct mge_softc *sc)
491 struct mge_desc_wrapper *dw;
494 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
495 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
496 16, 0, /* alignment, boundary */
497 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
498 BUS_SPACE_MAXADDR, /* highaddr */
499 NULL, NULL, /* filtfunc, filtfuncarg */
500 sizeof(struct mge_desc), 1, /* maxsize, nsegments */
501 sizeof(struct mge_desc), 0, /* maxsegsz, flags */
502 NULL, NULL, /* lockfunc, lockfuncarg */
503 &sc->mge_desc_dtag); /* dmat */
506 mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
508 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
511 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
512 dw = &(sc->mge_rx_desc[i]);
513 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
514 &dw->mge_desc->buffer);
517 sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
518 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
524 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
525 uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
527 struct mge_desc_wrapper *dw;
530 for (i = 0; i < size; i++) {
534 if (dw->buffer_dmap) {
536 bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
537 BUS_DMASYNC_POSTREAD);
538 bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
540 bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
544 /* Free RX descriptors */
546 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
547 BUS_DMASYNC_POSTREAD);
548 bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
549 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
556 mge_free_dma(struct mge_softc *sc)
558 /* Free desciptors and mbufs */
559 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
560 mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
562 /* Destroy mbuf dma tag */
563 bus_dma_tag_destroy(sc->mge_tx_dtag);
564 bus_dma_tag_destroy(sc->mge_rx_dtag);
565 /* Destroy descriptors tag */
566 bus_dma_tag_destroy(sc->mge_desc_dtag);
570 mge_reinit_rx(struct mge_softc *sc)
572 struct mge_desc_wrapper *dw;
575 MGE_RECEIVE_LOCK_ASSERT(sc);
577 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
579 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
582 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
583 dw = &(sc->mge_rx_desc[i]);
584 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
585 &dw->mge_desc->buffer);
588 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
589 sc->rx_desc_curr = 0;
591 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
594 /* Enable RX queue */
595 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
598 #ifdef DEVICE_POLLING
599 static poll_handler_t mge_poll;
602 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
604 struct mge_softc *sc = ifp->if_softc;
605 uint32_t int_cause, int_cause_ext;
610 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
611 MGE_GLOBAL_UNLOCK(sc);
615 if (cmd == POLL_AND_CHECK_STATUS) {
616 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
617 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
619 /* Check for resource error */
620 if (int_cause & MGE_PORT_INT_RXERRQ0)
623 if (int_cause || int_cause_ext) {
624 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
625 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
629 mge_intr_tx_locked(sc);
630 rx_npkts = mge_intr_rx_locked(sc, count);
632 MGE_GLOBAL_UNLOCK(sc);
635 #endif /* DEVICE_POLLING */
638 mge_attach(device_t dev)
640 struct mge_softc *sc;
641 struct mii_softc *miisc;
643 uint8_t hwaddr[ETHER_ADDR_LEN];
646 sc = device_get_softc(dev);
648 sc->node = ofw_bus_get_node(dev);
650 /* Set chip version-dependent parameters */
653 /* Get phy address and used softc from fdt */
654 if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) != 0)
657 /* Initialize mutexes */
658 mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
659 mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
661 /* Allocate IO and IRQ resources */
662 error = bus_alloc_resources(dev, res_spec, sc->res);
664 device_printf(dev, "could not allocate resources\n");
669 /* Allocate DMA, buffers, buffer descriptors */
670 error = mge_allocate_dma(sc);
676 sc->tx_desc_curr = 0;
677 sc->rx_desc_curr = 0;
678 sc->tx_desc_used_idx = 0;
679 sc->tx_desc_used_count = 0;
681 /* Configure defaults for interrupts coalescing */
682 sc->rx_ic_time = 768;
683 sc->tx_ic_time = 768;
686 /* Allocate network interface */
687 ifp = sc->ifp = if_alloc(IFT_ETHER);
689 device_printf(dev, "if_alloc() failed\n");
694 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
696 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
697 ifp->if_capabilities = IFCAP_VLAN_MTU;
698 if (sc->mge_hw_csum) {
699 ifp->if_capabilities |= IFCAP_HWCSUM;
700 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
702 ifp->if_capenable = ifp->if_capabilities;
704 #ifdef DEVICE_POLLING
705 /* Advertise that polling is supported */
706 ifp->if_capabilities |= IFCAP_POLLING;
709 ifp->if_init = mge_init;
710 ifp->if_start = mge_start;
711 ifp->if_ioctl = mge_ioctl;
713 ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
714 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
715 IFQ_SET_READY(&ifp->if_snd);
717 mge_get_mac_address(sc, hwaddr);
718 ether_ifattach(ifp, hwaddr);
719 callout_init(&sc->wd_callout, 0);
722 error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
723 mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
725 device_printf(dev, "attaching PHYs failed\n");
729 sc->mii = device_get_softc(sc->miibus);
731 /* Tell the MAC where to find the PHY so autoneg works */
732 miisc = LIST_FIRST(&sc->mii->mii_phys);
733 MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
735 /* Attach interrupt handlers */
736 /* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
737 for (i = 1; i <= sc->mge_intr_cnt; ++i) {
738 error = bus_setup_intr(dev, sc->res[i],
739 INTR_TYPE_NET | INTR_MPSAFE,
740 NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
741 sc, &sc->ih_cookie[i - 1]);
743 device_printf(dev, "could not setup %s\n",
744 mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
754 mge_detach(device_t dev)
756 struct mge_softc *sc;
759 sc = device_get_softc(dev);
761 /* Stop controller and free TX queue */
765 /* Wait for stopping ticks */
766 callout_drain(&sc->wd_callout);
768 /* Stop and release all interrupts */
769 for (i = 0; i < sc->mge_intr_cnt; ++i) {
770 if (!sc->ih_cookie[i])
773 error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
775 device_printf(dev, "could not release %s\n",
776 mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
779 /* Detach network interface */
781 ether_ifdetach(sc->ifp);
785 /* Free DMA resources */
788 /* Free IO memory handler */
789 bus_release_resources(dev, res_spec, sc->res);
791 /* Destroy mutexes */
792 mtx_destroy(&sc->receive_lock);
793 mtx_destroy(&sc->transmit_lock);
799 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
801 struct mge_softc *sc = ifp->if_softc;
802 struct mii_data *mii;
804 MGE_TRANSMIT_LOCK(sc);
809 ifmr->ifm_active = mii->mii_media_active;
810 ifmr->ifm_status = mii->mii_media_status;
812 MGE_TRANSMIT_UNLOCK(sc);
816 mge_set_port_serial_control(uint32_t media)
818 uint32_t port_config;
820 port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
821 PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
823 if (IFM_TYPE(media) == IFM_ETHER) {
824 switch(IFM_SUBTYPE(media)) {
828 port_config |= (PORT_SERIAL_GMII_SPEED_1000 |
829 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
830 PORT_SERIAL_SPEED_AUTONEG);
833 port_config |= (PORT_SERIAL_MII_SPEED_100 |
834 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
835 PORT_SERIAL_SPEED_AUTONEG);
838 port_config |= (PORT_SERIAL_AUTONEG |
839 PORT_SERIAL_AUTONEG_FC |
840 PORT_SERIAL_SPEED_AUTONEG);
844 port_config |= PORT_SERIAL_FULL_DUPLEX;
846 return (port_config);
850 mge_ifmedia_upd(struct ifnet *ifp)
852 struct mge_softc *sc = ifp->if_softc;
854 if (ifp->if_flags & IFF_UP) {
857 sc->mge_media_status = sc->mii->mii_media.ifm_media;
858 mii_mediachg(sc->mii);
861 MGE_GLOBAL_UNLOCK(sc);
870 struct mge_softc *sc = arg;
874 mge_init_locked(arg);
876 MGE_GLOBAL_UNLOCK(sc);
880 mge_init_locked(void *arg)
882 struct mge_softc *sc = arg;
883 struct mge_desc_wrapper *dw;
884 volatile uint32_t reg_val;
888 MGE_GLOBAL_LOCK_ASSERT(sc);
893 /* Disable interrupts */
894 mge_intrs_ctrl(sc, 0);
896 /* Set MAC address */
897 mge_set_mac_address(sc);
899 /* Setup multicast filters */
900 mge_setup_multicast(sc);
902 if (sc->mge_ver == 2) {
903 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
904 MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
907 /* Initialize TX queue configuration registers */
908 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
909 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
910 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
912 /* Clear TX queue configuration registers for unused queues */
913 for (i = 1; i < 7; i++) {
914 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
915 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
916 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
919 /* Set default MTU */
920 MGE_WRITE(sc, sc->mge_mtu, 0);
922 /* Port configuration */
923 MGE_WRITE(sc, MGE_PORT_CONFIG,
924 PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
925 PORT_CONFIG_ARO_RXQ(0));
926 MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
928 /* Setup port configuration */
929 reg_val = mge_set_port_serial_control(sc->mge_media_status);
930 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
932 /* Setup SDMA configuration */
933 MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
934 MGE_SDMA_TX_BYTE_SWAP |
935 MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
936 MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
938 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
940 MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
941 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
944 /* Reset descriptor indexes */
945 sc->tx_desc_curr = 0;
946 sc->rx_desc_curr = 0;
947 sc->tx_desc_used_idx = 0;
948 sc->tx_desc_used_count = 0;
950 /* Enable RX descriptors */
951 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
952 dw = &sc->mge_rx_desc[i];
953 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
954 dw->mge_desc->buff_size = MCLBYTES;
955 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
956 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
959 /* Enable RX queue */
960 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
963 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
964 reg_val |= PORT_SERIAL_ENABLE;
965 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
968 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
969 if (reg_val & MGE_STATUS_LINKUP)
973 if_printf(sc->ifp, "Timeout on link-up\n");
978 /* Setup interrupts coalescing */
982 /* Enable interrupts */
983 #ifdef DEVICE_POLLING
985 * * ...only if polling is not turned on. Disable interrupts explicitly
986 * if polling is enabled.
988 if (sc->ifp->if_capenable & IFCAP_POLLING)
989 mge_intrs_ctrl(sc, 0);
991 #endif /* DEVICE_POLLING */
992 mge_intrs_ctrl(sc, 1);
994 /* Activate network interface */
995 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
996 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
999 /* Schedule watchdog timeout */
1000 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1004 mge_intr_rxtx(void *arg)
1006 struct mge_softc *sc = arg;
1007 uint32_t int_cause, int_cause_ext;
1009 MGE_GLOBAL_LOCK(sc);
1011 #ifdef DEVICE_POLLING
1012 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1013 MGE_GLOBAL_UNLOCK(sc);
1018 /* Get interrupt cause */
1019 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1020 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1022 /* Check for Transmit interrupt */
1023 if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
1024 MGE_PORT_INT_EXT_TXUR)) {
1025 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1026 (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1027 mge_intr_tx_locked(sc);
1030 MGE_TRANSMIT_UNLOCK(sc);
1032 /* Check for Receive interrupt */
1033 mge_intr_rx_check(sc, int_cause, int_cause_ext);
1035 MGE_RECEIVE_UNLOCK(sc);
1039 mge_intr_err(void *arg)
1041 struct mge_softc *sc = arg;
1045 if_printf(ifp, "%s\n", __FUNCTION__);
1049 mge_intr_misc(void *arg)
1051 struct mge_softc *sc = arg;
1055 if_printf(ifp, "%s\n", __FUNCTION__);
1059 mge_intr_rx(void *arg) {
1060 struct mge_softc *sc = arg;
1061 uint32_t int_cause, int_cause_ext;
1063 MGE_RECEIVE_LOCK(sc);
1065 #ifdef DEVICE_POLLING
1066 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1067 MGE_RECEIVE_UNLOCK(sc);
1072 /* Get interrupt cause */
1073 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1074 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1076 mge_intr_rx_check(sc, int_cause, int_cause_ext);
1078 MGE_RECEIVE_UNLOCK(sc);
1082 mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
1083 uint32_t int_cause_ext)
1085 /* Check for resource error */
1086 if (int_cause & MGE_PORT_INT_RXERRQ0) {
1088 MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1089 ~(int_cause & MGE_PORT_INT_RXERRQ0));
1092 int_cause &= MGE_PORT_INT_RXQ0;
1093 int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1095 if (int_cause || int_cause_ext) {
1096 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1097 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1098 mge_intr_rx_locked(sc, -1);
1103 mge_intr_rx_locked(struct mge_softc *sc, int count)
1105 struct ifnet *ifp = sc->ifp;
1108 struct mge_desc_wrapper* dw;
1112 MGE_RECEIVE_LOCK_ASSERT(sc);
1114 while (count != 0) {
1115 dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1116 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1117 BUS_DMASYNC_POSTREAD);
1120 status = dw->mge_desc->cmd_status;
1121 bufsize = dw->mge_desc->buff_size;
1122 if ((status & MGE_DMA_OWNED) != 0)
1125 if (dw->mge_desc->byte_count &&
1126 ~(status & MGE_ERR_SUMMARY)) {
1128 bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1129 BUS_DMASYNC_POSTREAD);
1131 mb = m_devget(dw->buffer->m_data,
1132 dw->mge_desc->byte_count - ETHER_CRC_LEN,
1136 /* Give up if no mbufs */
1140 mb->m_pkthdr.len -= 2;
1143 mb->m_pkthdr.rcvif = ifp;
1145 mge_offload_process_frame(ifp, mb, status,
1148 MGE_RECEIVE_UNLOCK(sc);
1149 (*ifp->if_input)(ifp, mb);
1150 MGE_RECEIVE_LOCK(sc);
1154 dw->mge_desc->byte_count = 0;
1155 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1156 sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1157 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1158 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1164 if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_npkts);
1170 mge_intr_sum(void *arg)
1172 struct mge_softc *sc = arg;
1176 if_printf(ifp, "%s\n", __FUNCTION__);
1180 mge_intr_tx(void *arg)
1182 struct mge_softc *sc = arg;
1183 uint32_t int_cause_ext;
1185 MGE_TRANSMIT_LOCK(sc);
1187 #ifdef DEVICE_POLLING
1188 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1189 MGE_TRANSMIT_UNLOCK(sc);
1194 /* Ack the interrupt */
1195 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1196 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1197 (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1199 mge_intr_tx_locked(sc);
1201 MGE_TRANSMIT_UNLOCK(sc);
1206 mge_intr_tx_locked(struct mge_softc *sc)
1208 struct ifnet *ifp = sc->ifp;
1209 struct mge_desc_wrapper *dw;
1210 struct mge_desc *desc;
1214 MGE_TRANSMIT_LOCK_ASSERT(sc);
1216 /* Disable watchdog */
1219 while (sc->tx_desc_used_count) {
1220 /* Get the descriptor */
1221 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1222 desc = dw->mge_desc;
1223 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1224 BUS_DMASYNC_POSTREAD);
1226 /* Get descriptor status */
1227 status = desc->cmd_status;
1229 if (status & MGE_DMA_OWNED)
1232 sc->tx_desc_used_idx =
1233 (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1234 sc->tx_desc_used_count--;
1236 /* Update collision statistics */
1237 if (status & MGE_ERR_SUMMARY) {
1238 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1239 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1240 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1241 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16);
1244 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1245 BUS_DMASYNC_POSTWRITE);
1246 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1247 m_freem(dw->buffer);
1248 dw->buffer = (struct mbuf*)NULL;
1251 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1255 /* Now send anything that was pending */
1256 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1257 mge_start_locked(ifp);
1262 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1264 struct mge_softc *sc = ifp->if_softc;
1265 struct ifreq *ifr = (struct ifreq *)data;
1273 MGE_GLOBAL_LOCK(sc);
1275 if (ifp->if_flags & IFF_UP) {
1276 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1277 flags = ifp->if_flags ^ sc->mge_if_flags;
1278 if (flags & IFF_PROMISC)
1279 mge_set_prom_mode(sc,
1280 MGE_RX_DEFAULT_QUEUE);
1282 if (flags & IFF_ALLMULTI)
1283 mge_setup_multicast(sc);
1285 mge_init_locked(sc);
1287 else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1290 sc->mge_if_flags = ifp->if_flags;
1291 MGE_GLOBAL_UNLOCK(sc);
1295 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1296 MGE_GLOBAL_LOCK(sc);
1297 mge_setup_multicast(sc);
1298 MGE_GLOBAL_UNLOCK(sc);
1302 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1303 if (mask & IFCAP_HWCSUM) {
1304 ifp->if_capenable &= ~IFCAP_HWCSUM;
1305 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1306 if (ifp->if_capenable & IFCAP_TXCSUM)
1307 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1309 ifp->if_hwassist = 0;
1311 #ifdef DEVICE_POLLING
1312 if (mask & IFCAP_POLLING) {
1313 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1314 error = ether_poll_register(mge_poll, ifp);
1318 MGE_GLOBAL_LOCK(sc);
1319 mge_intrs_ctrl(sc, 0);
1320 ifp->if_capenable |= IFCAP_POLLING;
1321 MGE_GLOBAL_UNLOCK(sc);
1323 error = ether_poll_deregister(ifp);
1324 MGE_GLOBAL_LOCK(sc);
1325 mge_intrs_ctrl(sc, 1);
1326 ifp->if_capenable &= ~IFCAP_POLLING;
1327 MGE_GLOBAL_UNLOCK(sc);
1332 case SIOCGIFMEDIA: /* fall through */
1334 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1335 && !(ifr->ifr_media & IFM_FDX)) {
1336 device_printf(sc->dev,
1337 "1000baseTX half-duplex unsupported\n");
1340 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1343 error = ether_ioctl(ifp, command, data);
1349 mge_miibus_readreg(device_t dev, int phy, int reg)
1351 struct mge_softc *sc;
1354 sc = device_get_softc(dev);
1356 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
1357 (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1359 retries = MGE_SMI_READ_RETRIES;
1361 !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
1362 DELAY(MGE_SMI_READ_DELAY);
1365 device_printf(dev, "Timeout while reading from PHY\n");
1367 return (MGE_READ(sc->phy_sc, MGE_REG_SMI) & 0xffff);
1371 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1373 struct mge_softc *sc;
1376 sc = device_get_softc(dev);
1378 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
1379 (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1381 retries = MGE_SMI_WRITE_RETRIES;
1382 while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
1383 DELAY(MGE_SMI_WRITE_DELAY);
1386 device_printf(dev, "Timeout while writing to PHY\n");
1391 mge_probe(device_t dev)
1394 if (!ofw_bus_status_okay(dev))
1397 if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1400 device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1401 return (BUS_PROBE_DEFAULT);
1405 mge_resume(device_t dev)
1408 device_printf(dev, "%s\n", __FUNCTION__);
1413 mge_shutdown(device_t dev)
1415 struct mge_softc *sc = device_get_softc(dev);
1417 MGE_GLOBAL_LOCK(sc);
1419 #ifdef DEVICE_POLLING
1420 if (sc->ifp->if_capenable & IFCAP_POLLING)
1421 ether_poll_deregister(sc->ifp);
1426 MGE_GLOBAL_UNLOCK(sc);
1432 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1434 struct mge_desc_wrapper *dw = NULL;
1436 bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1444 /* Fetch unused map */
1445 desc_no = sc->tx_desc_curr;
1446 dw = &sc->mge_tx_desc[desc_no];
1447 mapp = dw->buffer_dmap;
1449 /* Create mapping in DMA memory */
1450 error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1457 /* Only one segment is supported. */
1459 bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1464 bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1466 /* Everything is ok, now we can send buffers */
1467 for (seg = 0; seg < nsegs; seg++) {
1468 dw->mge_desc->byte_count = segs[seg].ds_len;
1469 dw->mge_desc->buffer = segs[seg].ds_addr;
1471 dw->mge_desc->cmd_status = 0;
1473 mge_offload_setup_descriptor(sc, dw);
1474 dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1475 MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1479 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1480 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1482 sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1483 sc->tx_desc_used_count++;
1490 struct mge_softc *sc = msc;
1492 /* Check for TX timeout */
1497 /* Check for media type change */
1498 if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1499 mge_ifmedia_upd(sc->ifp);
1501 /* Schedule another timeout one second from now */
1502 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1506 mge_watchdog(struct mge_softc *sc)
1512 MGE_GLOBAL_LOCK(sc);
1514 if (sc->wd_timer == 0 || --sc->wd_timer) {
1515 MGE_GLOBAL_UNLOCK(sc);
1519 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1520 if_printf(ifp, "watchdog timeout\n");
1523 mge_init_locked(sc);
1525 MGE_GLOBAL_UNLOCK(sc);
1529 mge_start(struct ifnet *ifp)
1531 struct mge_softc *sc = ifp->if_softc;
1533 MGE_TRANSMIT_LOCK(sc);
1535 mge_start_locked(ifp);
1537 MGE_TRANSMIT_UNLOCK(sc);
1541 mge_start_locked(struct ifnet *ifp)
1543 struct mge_softc *sc;
1544 struct mbuf *m0, *mtmp;
1545 uint32_t reg_val, queued = 0;
1549 MGE_TRANSMIT_LOCK_ASSERT(sc);
1551 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1556 /* Get packet from the queue */
1557 IF_DEQUEUE(&ifp->if_snd, m0);
1561 if (m0->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP) ||
1562 m0->m_flags & M_VLANTAG) {
1563 if (M_WRITABLE(m0) == 0) {
1564 mtmp = m_dup(m0, M_NOWAIT);
1571 /* The driver support only one DMA fragment. */
1572 if (m0->m_next != NULL) {
1573 mtmp = m_defrag(m0, M_NOWAIT);
1578 /* Check for free descriptors */
1579 if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1580 IF_PREPEND(&ifp->if_snd, m0);
1581 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1585 if (mge_encap(sc, m0) != 0)
1593 /* Enable transmitter and watchdog timer */
1594 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1595 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1601 mge_stop(struct mge_softc *sc)
1604 volatile uint32_t reg_val, status;
1605 struct mge_desc_wrapper *dw;
1606 struct mge_desc *desc;
1611 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1614 /* Stop tick engine */
1615 callout_stop(&sc->wd_callout);
1617 /* Disable interface */
1618 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1621 /* Disable interrupts */
1622 mge_intrs_ctrl(sc, 0);
1624 /* Disable Rx and Tx */
1625 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1626 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1627 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1629 /* Remove pending data from TX queue */
1630 while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1631 sc->tx_desc_used_count) {
1632 /* Get the descriptor */
1633 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1634 desc = dw->mge_desc;
1635 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1636 BUS_DMASYNC_POSTREAD);
1638 /* Get descriptor status */
1639 status = desc->cmd_status;
1641 if (status & MGE_DMA_OWNED)
1644 sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1646 sc->tx_desc_used_count--;
1648 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1649 BUS_DMASYNC_POSTWRITE);
1650 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1652 m_freem(dw->buffer);
1653 dw->buffer = (struct mbuf*)NULL;
1656 /* Wait for end of transmission */
1659 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1660 if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1661 (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1667 if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1670 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1671 reg_val &= ~(PORT_SERIAL_ENABLE);
1672 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1676 mge_suspend(device_t dev)
1679 device_printf(dev, "%s\n", __FUNCTION__);
1684 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1685 uint32_t status, uint16_t bufsize)
1689 if (ifp->if_capenable & IFCAP_RXCSUM) {
1690 if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1691 csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1693 if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1694 (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1695 (status & MGE_RX_L4_CSUM_OK)) {
1696 csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1697 frame->m_pkthdr.csum_data = 0xFFFF;
1700 frame->m_pkthdr.csum_flags = csum_flags;
1705 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1707 struct mbuf *m0 = dw->buffer;
1708 struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1709 int csum_flags = m0->m_pkthdr.csum_flags;
1715 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1716 etype = ntohs(eh->evl_proto);
1717 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1718 csum_flags |= MGE_TX_VLAN_TAGGED;
1720 etype = ntohs(eh->evl_encap_proto);
1721 ehlen = ETHER_HDR_LEN;
1724 if (etype != ETHERTYPE_IP) {
1726 "TCP/IP Offload enabled for unsupported "
1731 ip = (struct ip *)(m0->m_data + ehlen);
1732 cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1733 cmd_status |= MGE_TX_NOT_FRAGMENT;
1736 if (csum_flags & CSUM_IP)
1737 cmd_status |= MGE_TX_GEN_IP_CSUM;
1739 if (csum_flags & CSUM_TCP)
1740 cmd_status |= MGE_TX_GEN_L4_CSUM;
1742 if (csum_flags & CSUM_UDP)
1743 cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1745 dw->mge_desc->cmd_status |= cmd_status;
1749 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1753 MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1754 MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1755 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1756 MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1757 MGE_PORT_INT_EXT_TXBUF0);
1759 MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1760 MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1762 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1763 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1765 MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1766 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1771 mge_crc8(uint8_t *data, int size)
1774 static const uint8_t ct[256] = {
1775 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1776 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1777 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1778 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1779 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1780 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1781 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1782 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1783 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1784 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1785 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1786 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1787 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1788 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1789 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1790 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1791 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1792 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1793 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1794 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1795 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1796 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1797 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1798 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1799 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1800 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1801 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1802 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1803 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1804 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1805 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1806 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1810 crc = ct[crc ^ *(data++)];
1816 mge_setup_multicast(struct mge_softc *sc)
1818 uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1819 uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1820 uint32_t smt[MGE_MCAST_REG_NUMBER];
1821 uint32_t omt[MGE_MCAST_REG_NUMBER];
1822 struct ifnet *ifp = sc->ifp;
1823 struct ifmultiaddr *ifma;
1827 if (ifp->if_flags & IFF_ALLMULTI) {
1828 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1829 smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1831 memset(smt, 0, sizeof(smt));
1832 memset(omt, 0, sizeof(omt));
1834 if_maddr_rlock(ifp);
1835 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1836 if (ifma->ifma_addr->sa_family != AF_LINK)
1839 mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1840 if (memcmp(mac, special, sizeof(special)) == 0) {
1842 smt[i >> 2] |= v << ((i & 0x03) << 3);
1844 i = mge_crc8(mac, ETHER_ADDR_LEN);
1845 omt[i >> 2] |= v << ((i & 0x03) << 3);
1848 if_maddr_runlock(ifp);
1851 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1852 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1853 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1858 mge_set_rxic(struct mge_softc *sc)
1862 if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1863 sc->rx_ic_time = sc->mge_rx_ipg_max;
1865 reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1866 reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1867 reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1868 MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1872 mge_set_txic(struct mge_softc *sc)
1876 if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1877 sc->tx_ic_time = sc->mge_tfut_ipg_max;
1879 reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1880 reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1881 reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1882 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1886 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1888 struct mge_softc *sc = (struct mge_softc *)arg1;
1892 time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1893 error = sysctl_handle_int(oidp, &time, 0, req);
1897 MGE_GLOBAL_LOCK(sc);
1898 if (arg2 == MGE_IC_RX) {
1899 sc->rx_ic_time = time;
1902 sc->tx_ic_time = time;
1905 MGE_GLOBAL_UNLOCK(sc);
1911 mge_add_sysctls(struct mge_softc *sc)
1913 struct sysctl_ctx_list *ctx;
1914 struct sysctl_oid_list *children;
1915 struct sysctl_oid *tree;
1917 ctx = device_get_sysctl_ctx(sc->dev);
1918 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1919 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1920 CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1921 children = SYSCTL_CHILDREN(tree);
1923 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1924 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1925 "I", "IC RX time threshold");
1926 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1927 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1928 "I", "IC TX time threshold");