2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5 * Developed by Semihalf.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of MARVELL nor the names of contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
50 #include <net/ethernet.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
63 #include <sys/sockio.h>
65 #include <machine/bus.h>
67 #include <machine/resource.h>
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
72 #include <dev/fdt/fdt_common.h>
73 #include <dev/ofw/ofw_bus.h>
74 #include <dev/ofw/ofw_bus_subr.h>
76 #include <dev/mge/if_mgevar.h>
77 #include <arm/mv/mvreg.h>
78 #include <arm/mv/mvvar.h>
80 #include "miibus_if.h"
82 /* PHY registers are in the address space of the first mge unit */
83 static struct mge_softc *sc_mge0 = NULL;
85 static int mge_probe(device_t dev);
86 static int mge_attach(device_t dev);
87 static int mge_detach(device_t dev);
88 static int mge_shutdown(device_t dev);
89 static int mge_suspend(device_t dev);
90 static int mge_resume(device_t dev);
92 static int mge_miibus_readreg(device_t dev, int phy, int reg);
93 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
95 static int mge_ifmedia_upd(struct ifnet *ifp);
96 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
98 static void mge_init(void *arg);
99 static void mge_init_locked(void *arg);
100 static void mge_start(struct ifnet *ifp);
101 static void mge_start_locked(struct ifnet *ifp);
102 static void mge_watchdog(struct mge_softc *sc);
103 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
105 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
106 static uint32_t mge_rx_ipg(uint32_t val, int ver);
107 static void mge_ver_params(struct mge_softc *sc);
109 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
110 static void mge_intr_rx(void *arg);
111 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
112 static void mge_intr_tx(void *arg);
113 static void mge_intr_tx_locked(struct mge_softc *sc);
114 static void mge_intr_misc(void *arg);
115 static void mge_intr_sum(void *arg);
116 static void mge_intr_err(void *arg);
117 static void mge_stop(struct mge_softc *sc);
118 static void mge_tick(void *msc);
119 static uint32_t mge_set_port_serial_control(uint32_t media);
120 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
121 static void mge_set_mac_address(struct mge_softc *sc);
122 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
124 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
125 static int mge_allocate_dma(struct mge_softc *sc);
126 static int mge_alloc_desc_dma(struct mge_softc *sc,
127 struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
128 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
129 struct mbuf **mbufp, bus_addr_t *paddr);
130 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
131 static void mge_free_dma(struct mge_softc *sc);
132 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
133 bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
134 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
135 uint32_t status, uint16_t bufsize);
136 static void mge_offload_setup_descriptor(struct mge_softc *sc,
137 struct mge_desc_wrapper *dw);
138 static uint8_t mge_crc8(uint8_t *data, int size);
139 static void mge_setup_multicast(struct mge_softc *sc);
140 static void mge_set_rxic(struct mge_softc *sc);
141 static void mge_set_txic(struct mge_softc *sc);
142 static void mge_add_sysctls(struct mge_softc *sc);
143 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
145 static device_method_t mge_methods[] = {
146 /* Device interface */
147 DEVMETHOD(device_probe, mge_probe),
148 DEVMETHOD(device_attach, mge_attach),
149 DEVMETHOD(device_detach, mge_detach),
150 DEVMETHOD(device_shutdown, mge_shutdown),
151 DEVMETHOD(device_suspend, mge_suspend),
152 DEVMETHOD(device_resume, mge_resume),
154 DEVMETHOD(miibus_readreg, mge_miibus_readreg),
155 DEVMETHOD(miibus_writereg, mge_miibus_writereg),
159 static driver_t mge_driver = {
162 sizeof(struct mge_softc),
165 static devclass_t mge_devclass;
167 DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0);
168 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
169 MODULE_DEPEND(mge, ether, 1, 1, 1);
170 MODULE_DEPEND(mge, miibus, 1, 1, 1);
172 static struct resource_spec res_spec[] = {
173 { SYS_RES_MEMORY, 0, RF_ACTIVE },
174 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
175 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
176 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
177 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
178 { SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
183 driver_intr_t *handler;
185 } mge_intrs[MGE_INTR_COUNT] = {
186 { mge_intr_rx, "GbE receive interrupt" },
187 { mge_intr_tx, "GbE transmit interrupt" },
188 { mge_intr_misc,"GbE misc interrupt" },
189 { mge_intr_sum, "GbE summary interrupt" },
190 { mge_intr_err, "GbE error interrupt" },
194 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
196 uint32_t mac_l, mac_h;
201 * Retrieve hw address from the device tree.
203 i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
206 for (i = 0; i < 6; i++)
213 bcopy(lmac, addr, 6);
219 * Fall back -- use the currently programmed address.
221 mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
222 mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
224 addr[0] = (mac_h & 0xff000000) >> 24;
225 addr[1] = (mac_h & 0x00ff0000) >> 16;
226 addr[2] = (mac_h & 0x0000ff00) >> 8;
227 addr[3] = (mac_h & 0x000000ff);
228 addr[4] = (mac_l & 0x0000ff00) >> 8;
229 addr[5] = (mac_l & 0x000000ff);
233 mge_tfut_ipg(uint32_t val, int ver)
238 return ((val & 0x3fff) << 4);
241 return ((val & 0xffff) << 4);
246 mge_rx_ipg(uint32_t val, int ver)
251 return ((val & 0x3fff) << 8);
254 return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
259 mge_ver_params(struct mge_softc *sc)
264 if (d == MV_DEV_88F6281 || d == MV_DEV_MV78100 ||
265 d == MV_DEV_MV78100_Z0) {
268 sc->mge_tfut_ipg_max = 0xFFFF;
269 sc->mge_rx_ipg_max = 0xFFFF;
270 sc->mge_tx_arb_cfg = 0xFC0000FF;
271 sc->mge_tx_tok_cfg = 0xFFFF7FFF;
272 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
276 sc->mge_tfut_ipg_max = 0x3FFF;
277 sc->mge_rx_ipg_max = 0x3FFF;
278 sc->mge_tx_arb_cfg = 0x000000FF;
279 sc->mge_tx_tok_cfg = 0x3FFFFFFF;
280 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
285 mge_set_mac_address(struct mge_softc *sc)
288 uint32_t mac_l, mac_h;
290 MGE_GLOBAL_LOCK_ASSERT(sc);
292 if_mac = (char *)IF_LLADDR(sc->ifp);
294 mac_l = (if_mac[4] << 8) | (if_mac[5]);
295 mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
296 (if_mac[2] << 8) | (if_mac[3] << 0);
298 MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
299 MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
301 mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
305 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
307 uint32_t reg_idx, reg_off, reg_val, i;
310 reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
311 reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
312 reg_val = (1 | (queue << 1)) << reg_off;
314 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
316 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
318 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
323 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
325 uint32_t port_config;
328 /* Enable or disable promiscuous mode as needed */
329 if (sc->ifp->if_flags & IFF_PROMISC) {
330 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
331 port_config |= PORT_CONFIG_UPM;
332 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
334 reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
335 (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
337 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
338 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
339 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
342 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
343 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
346 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
347 port_config &= ~PORT_CONFIG_UPM;
348 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
350 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
351 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
352 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
355 mge_set_mac_address(sc);
360 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
364 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
367 *paddr = segs->ds_addr;
371 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
374 struct mbuf *new_mbuf;
375 bus_dma_segment_t seg[1];
379 KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
381 new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
382 if (new_mbuf == NULL)
384 new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
387 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
388 bus_dmamap_unload(tag, map);
391 error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
393 KASSERT(nsegs == 1, ("Too many segments returned!"));
394 if (nsegs != 1 || error)
395 panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
397 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
400 (*paddr) = seg->ds_addr;
405 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
406 uint32_t size, bus_dma_tag_t *buffer_tag)
408 struct mge_desc_wrapper *dw;
409 bus_addr_t desc_paddr;
413 for (i = size - 1; i >= 0; i--) {
415 error = bus_dmamem_alloc(sc->mge_desc_dtag,
416 (void**)&(dw->mge_desc),
417 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
421 if_printf(sc->ifp, "failed to allocate DMA memory\n");
426 error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
427 dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
428 &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
431 if_printf(sc->ifp, "can't load descriptor\n");
432 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
438 /* Chain descriptors */
439 dw->mge_desc->next_desc = desc_paddr;
440 desc_paddr = dw->mge_desc_paddr;
442 tab[size - 1].mge_desc->next_desc = desc_paddr;
444 /* Allocate a busdma tag for mbufs. */
445 error = bus_dma_tag_create(NULL, /* parent */
446 8, 0, /* alignment, boundary */
447 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
448 BUS_SPACE_MAXADDR, /* highaddr */
449 NULL, NULL, /* filtfunc, filtfuncarg */
450 MCLBYTES, 1, /* maxsize, nsegments */
451 MCLBYTES, 0, /* maxsegsz, flags */
452 NULL, NULL, /* lockfunc, lockfuncarg */
453 buffer_tag); /* dmat */
455 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
459 /* Create TX busdma maps */
460 for (i = 0; i < size; i++) {
462 error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
464 if_printf(sc->ifp, "failed to create map for mbuf\n");
468 dw->buffer = (struct mbuf*)NULL;
469 dw->mge_desc->buffer = (bus_addr_t)NULL;
476 mge_allocate_dma(struct mge_softc *sc)
479 struct mge_desc_wrapper *dw;
482 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
483 error = bus_dma_tag_create(NULL, /* parent */
484 16, 0, /* alignment, boundary */
485 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
486 BUS_SPACE_MAXADDR, /* highaddr */
487 NULL, NULL, /* filtfunc, filtfuncarg */
488 sizeof(struct mge_desc), 1, /* maxsize, nsegments */
489 sizeof(struct mge_desc), 0, /* maxsegsz, flags */
490 NULL, NULL, /* lockfunc, lockfuncarg */
491 &sc->mge_desc_dtag); /* dmat */
494 mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
496 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
499 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
500 dw = &(sc->mge_rx_desc[i]);
501 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
502 &dw->mge_desc->buffer);
505 sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
506 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
512 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
513 uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
515 struct mge_desc_wrapper *dw;
518 for (i = 0; i < size; i++) {
522 if (dw->buffer_dmap) {
524 bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
525 BUS_DMASYNC_POSTREAD);
526 bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
528 bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
532 /* Free RX descriptors */
534 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
535 BUS_DMASYNC_POSTREAD);
536 bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
537 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
544 mge_free_dma(struct mge_softc *sc)
546 /* Free desciptors and mbufs */
547 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
548 mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
550 /* Destroy mbuf dma tag */
551 bus_dma_tag_destroy(sc->mge_tx_dtag);
552 bus_dma_tag_destroy(sc->mge_rx_dtag);
553 /* Destroy descriptors tag */
554 bus_dma_tag_destroy(sc->mge_desc_dtag);
558 mge_reinit_rx(struct mge_softc *sc)
560 struct mge_desc_wrapper *dw;
563 MGE_RECEIVE_LOCK_ASSERT(sc);
565 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
567 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
570 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
571 dw = &(sc->mge_rx_desc[i]);
572 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
573 &dw->mge_desc->buffer);
576 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
577 sc->rx_desc_curr = 0;
579 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
582 /* Enable RX queue */
583 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
586 #ifdef DEVICE_POLLING
587 static poll_handler_t mge_poll;
590 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
592 struct mge_softc *sc = ifp->if_softc;
593 uint32_t int_cause, int_cause_ext;
598 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
599 MGE_GLOBAL_UNLOCK(sc);
603 if (cmd == POLL_AND_CHECK_STATUS) {
604 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
605 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
607 /* Check for resource error */
608 if (int_cause & MGE_PORT_INT_RXERRQ0)
611 if (int_cause || int_cause_ext) {
612 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
613 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
617 mge_intr_tx_locked(sc);
618 rx_npkts = mge_intr_rx_locked(sc, count);
620 MGE_GLOBAL_UNLOCK(sc);
623 #endif /* DEVICE_POLLING */
626 mge_attach(device_t dev)
628 struct mge_softc *sc;
629 struct mii_softc *miisc;
631 uint8_t hwaddr[ETHER_ADDR_LEN];
634 sc = device_get_softc(dev);
636 sc->node = ofw_bus_get_node(dev);
638 if (device_get_unit(dev) == 0)
641 /* Set chip version-dependent parameters */
644 /* Get phy address from fdt */
645 if (fdt_get_phyaddr(sc->node, &phy) != 0)
648 /* Initialize mutexes */
649 mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
650 mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
652 /* Allocate IO and IRQ resources */
653 error = bus_alloc_resources(dev, res_spec, sc->res);
655 device_printf(dev, "could not allocate resources\n");
660 /* Allocate DMA, buffers, buffer descriptors */
661 error = mge_allocate_dma(sc);
667 sc->tx_desc_curr = 0;
668 sc->rx_desc_curr = 0;
669 sc->tx_desc_used_idx = 0;
670 sc->tx_desc_used_count = 0;
672 /* Configure defaults for interrupts coalescing */
673 sc->rx_ic_time = 768;
674 sc->tx_ic_time = 768;
677 /* Allocate network interface */
678 ifp = sc->ifp = if_alloc(IFT_ETHER);
680 device_printf(dev, "if_alloc() failed\n");
685 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
687 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
688 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
689 ifp->if_capenable = ifp->if_capabilities;
690 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
692 #ifdef DEVICE_POLLING
693 /* Advertise that polling is supported */
694 ifp->if_capabilities |= IFCAP_POLLING;
697 ifp->if_init = mge_init;
698 ifp->if_start = mge_start;
699 ifp->if_ioctl = mge_ioctl;
701 ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
702 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
703 IFQ_SET_READY(&ifp->if_snd);
705 mge_get_mac_address(sc, hwaddr);
706 ether_ifattach(ifp, hwaddr);
707 callout_init(&sc->wd_callout, 0);
710 error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
711 mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
713 device_printf(dev, "attaching PHYs failed\n");
717 sc->mii = device_get_softc(sc->miibus);
719 /* Tell the MAC where to find the PHY so autoneg works */
720 miisc = LIST_FIRST(&sc->mii->mii_phys);
721 MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
723 /* Attach interrupt handlers */
724 for (i = 0; i < 2; ++i) {
725 error = bus_setup_intr(dev, sc->res[1 + i],
726 INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
727 sc, &sc->ih_cookie[i]);
729 device_printf(dev, "could not setup %s\n",
730 mge_intrs[i].description);
740 mge_detach(device_t dev)
742 struct mge_softc *sc;
745 sc = device_get_softc(dev);
747 /* Stop controller and free TX queue */
751 /* Wait for stopping ticks */
752 callout_drain(&sc->wd_callout);
754 /* Stop and release all interrupts */
755 for (i = 0; i < 2; ++i) {
756 if (!sc->ih_cookie[i])
759 error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
761 device_printf(dev, "could not release %s\n",
762 mge_intrs[i].description);
765 /* Detach network interface */
767 ether_ifdetach(sc->ifp);
771 /* Free DMA resources */
774 /* Free IO memory handler */
775 bus_release_resources(dev, res_spec, sc->res);
777 /* Destroy mutexes */
778 mtx_destroy(&sc->receive_lock);
779 mtx_destroy(&sc->transmit_lock);
785 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
787 struct mge_softc *sc = ifp->if_softc;
788 struct mii_data *mii;
790 MGE_TRANSMIT_LOCK(sc);
795 ifmr->ifm_active = mii->mii_media_active;
796 ifmr->ifm_status = mii->mii_media_status;
798 MGE_TRANSMIT_UNLOCK(sc);
802 mge_set_port_serial_control(uint32_t media)
804 uint32_t port_config;
806 port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
807 PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
809 if (IFM_TYPE(media) == IFM_ETHER) {
810 switch(IFM_SUBTYPE(media)) {
814 port_config |= (PORT_SERIAL_GMII_SPEED_1000 |
815 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
816 PORT_SERIAL_SPEED_AUTONEG);
819 port_config |= (PORT_SERIAL_MII_SPEED_100 |
820 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
821 PORT_SERIAL_SPEED_AUTONEG);
824 port_config |= (PORT_SERIAL_AUTONEG |
825 PORT_SERIAL_AUTONEG_FC |
826 PORT_SERIAL_SPEED_AUTONEG);
830 port_config |= PORT_SERIAL_FULL_DUPLEX;
832 return (port_config);
836 mge_ifmedia_upd(struct ifnet *ifp)
838 struct mge_softc *sc = ifp->if_softc;
840 if (ifp->if_flags & IFF_UP) {
843 sc->mge_media_status = sc->mii->mii_media.ifm_media;
844 mii_mediachg(sc->mii);
847 MGE_GLOBAL_UNLOCK(sc);
856 struct mge_softc *sc = arg;
860 mge_init_locked(arg);
862 MGE_GLOBAL_UNLOCK(sc);
866 mge_init_locked(void *arg)
868 struct mge_softc *sc = arg;
869 struct mge_desc_wrapper *dw;
870 volatile uint32_t reg_val;
874 MGE_GLOBAL_LOCK_ASSERT(sc);
879 /* Disable interrupts */
880 mge_intrs_ctrl(sc, 0);
882 /* Set MAC address */
883 mge_set_mac_address(sc);
885 /* Setup multicast filters */
886 mge_setup_multicast(sc);
888 if (sc->mge_ver == 2) {
889 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
890 MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
893 /* Initialize TX queue configuration registers */
894 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
895 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
896 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
898 /* Clear TX queue configuration registers for unused queues */
899 for (i = 1; i < 7; i++) {
900 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
901 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
902 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
905 /* Set default MTU */
906 MGE_WRITE(sc, sc->mge_mtu, 0);
908 /* Port configuration */
909 MGE_WRITE(sc, MGE_PORT_CONFIG,
910 PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
911 PORT_CONFIG_ARO_RXQ(0));
912 MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
914 /* Setup port configuration */
915 reg_val = mge_set_port_serial_control(sc->mge_media_status);
916 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
918 /* Setup SDMA configuration */
919 MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
920 MGE_SDMA_TX_BYTE_SWAP |
921 MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
922 MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
924 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
926 MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
927 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
930 /* Reset descriptor indexes */
931 sc->tx_desc_curr = 0;
932 sc->rx_desc_curr = 0;
933 sc->tx_desc_used_idx = 0;
934 sc->tx_desc_used_count = 0;
936 /* Enable RX descriptors */
937 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
938 dw = &sc->mge_rx_desc[i];
939 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
940 dw->mge_desc->buff_size = MCLBYTES;
941 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
942 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
945 /* Enable RX queue */
946 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
949 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
950 reg_val |= PORT_SERIAL_ENABLE;
951 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
954 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
955 if (reg_val & MGE_STATUS_LINKUP)
959 if_printf(sc->ifp, "Timeout on link-up\n");
964 /* Setup interrupts coalescing */
968 /* Enable interrupts */
969 #ifdef DEVICE_POLLING
971 * * ...only if polling is not turned on. Disable interrupts explicitly
972 * if polling is enabled.
974 if (sc->ifp->if_capenable & IFCAP_POLLING)
975 mge_intrs_ctrl(sc, 0);
977 #endif /* DEVICE_POLLING */
978 mge_intrs_ctrl(sc, 1);
980 /* Activate network interface */
981 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
982 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
985 /* Schedule watchdog timeout */
986 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
990 mge_intr_err(void *arg)
992 struct mge_softc *sc = arg;
996 if_printf(ifp, "%s\n", __FUNCTION__);
1000 mge_intr_misc(void *arg)
1002 struct mge_softc *sc = arg;
1006 if_printf(ifp, "%s\n", __FUNCTION__);
1010 mge_intr_rx(void *arg) {
1011 struct mge_softc *sc = arg;
1012 uint32_t int_cause, int_cause_ext;
1014 MGE_RECEIVE_LOCK(sc);
1016 #ifdef DEVICE_POLLING
1017 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1018 MGE_RECEIVE_UNLOCK(sc);
1023 /* Get interrupt cause */
1024 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1025 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1027 /* Check for resource error */
1028 if (int_cause & MGE_PORT_INT_RXERRQ0) {
1030 MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1031 int_cause & ~MGE_PORT_INT_RXERRQ0);
1034 int_cause &= MGE_PORT_INT_RXQ0;
1035 int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1037 if (int_cause || int_cause_ext) {
1038 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1039 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1040 mge_intr_rx_locked(sc, -1);
1043 MGE_RECEIVE_UNLOCK(sc);
1048 mge_intr_rx_locked(struct mge_softc *sc, int count)
1050 struct ifnet *ifp = sc->ifp;
1053 struct mge_desc_wrapper* dw;
1057 MGE_RECEIVE_LOCK_ASSERT(sc);
1059 while (count != 0) {
1060 dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1061 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1062 BUS_DMASYNC_POSTREAD);
1065 status = dw->mge_desc->cmd_status;
1066 bufsize = dw->mge_desc->buff_size;
1067 if ((status & MGE_DMA_OWNED) != 0)
1070 if (dw->mge_desc->byte_count &&
1071 ~(status & MGE_ERR_SUMMARY)) {
1073 bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1074 BUS_DMASYNC_POSTREAD);
1076 mb = m_devget(dw->buffer->m_data,
1077 dw->mge_desc->byte_count - ETHER_CRC_LEN,
1081 /* Give up if no mbufs */
1085 mb->m_pkthdr.len -= 2;
1088 mge_offload_process_frame(ifp, mb, status,
1091 MGE_RECEIVE_UNLOCK(sc);
1092 (*ifp->if_input)(ifp, mb);
1093 MGE_RECEIVE_LOCK(sc);
1097 dw->mge_desc->byte_count = 0;
1098 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1099 sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1100 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1101 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1111 mge_intr_sum(void *arg)
1113 struct mge_softc *sc = arg;
1117 if_printf(ifp, "%s\n", __FUNCTION__);
1121 mge_intr_tx(void *arg)
1123 struct mge_softc *sc = arg;
1124 uint32_t int_cause_ext;
1126 MGE_TRANSMIT_LOCK(sc);
1128 #ifdef DEVICE_POLLING
1129 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1130 MGE_TRANSMIT_UNLOCK(sc);
1135 /* Ack the interrupt */
1136 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1137 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
1138 int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
1140 mge_intr_tx_locked(sc);
1142 MGE_TRANSMIT_UNLOCK(sc);
1147 mge_intr_tx_locked(struct mge_softc *sc)
1149 struct ifnet *ifp = sc->ifp;
1150 struct mge_desc_wrapper *dw;
1151 struct mge_desc *desc;
1155 MGE_TRANSMIT_LOCK_ASSERT(sc);
1157 /* Disable watchdog */
1160 while (sc->tx_desc_used_count) {
1161 /* Get the descriptor */
1162 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1163 desc = dw->mge_desc;
1164 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1165 BUS_DMASYNC_POSTREAD);
1167 /* Get descriptor status */
1168 status = desc->cmd_status;
1170 if (status & MGE_DMA_OWNED)
1173 sc->tx_desc_used_idx =
1174 (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1175 sc->tx_desc_used_count--;
1177 /* Update collision statistics */
1178 if (status & MGE_ERR_SUMMARY) {
1179 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1180 ifp->if_collisions++;
1181 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1182 ifp->if_collisions += 16;
1185 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1186 BUS_DMASYNC_POSTWRITE);
1187 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1188 m_freem(dw->buffer);
1189 dw->buffer = (struct mbuf*)NULL;
1196 /* Now send anything that was pending */
1197 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1198 mge_start_locked(ifp);
1203 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1205 struct mge_softc *sc = ifp->if_softc;
1206 struct ifreq *ifr = (struct ifreq *)data;
1214 MGE_GLOBAL_LOCK(sc);
1216 if (ifp->if_flags & IFF_UP) {
1217 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1218 flags = ifp->if_flags ^ sc->mge_if_flags;
1219 if (flags & IFF_PROMISC)
1220 mge_set_prom_mode(sc,
1221 MGE_RX_DEFAULT_QUEUE);
1223 if (flags & IFF_ALLMULTI)
1224 mge_setup_multicast(sc);
1226 mge_init_locked(sc);
1228 else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1231 sc->mge_if_flags = ifp->if_flags;
1232 MGE_GLOBAL_UNLOCK(sc);
1236 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1237 MGE_GLOBAL_LOCK(sc);
1238 mge_setup_multicast(sc);
1239 MGE_GLOBAL_UNLOCK(sc);
1243 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1244 if (mask & IFCAP_HWCSUM) {
1245 ifp->if_capenable &= ~IFCAP_HWCSUM;
1246 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1247 if (ifp->if_capenable & IFCAP_TXCSUM)
1248 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1250 ifp->if_hwassist = 0;
1252 #ifdef DEVICE_POLLING
1253 if (mask & IFCAP_POLLING) {
1254 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1255 error = ether_poll_register(mge_poll, ifp);
1259 MGE_GLOBAL_LOCK(sc);
1260 mge_intrs_ctrl(sc, 0);
1261 ifp->if_capenable |= IFCAP_POLLING;
1262 MGE_GLOBAL_UNLOCK(sc);
1264 error = ether_poll_deregister(ifp);
1265 MGE_GLOBAL_LOCK(sc);
1266 mge_intrs_ctrl(sc, 1);
1267 ifp->if_capenable &= ~IFCAP_POLLING;
1268 MGE_GLOBAL_UNLOCK(sc);
1273 case SIOCGIFMEDIA: /* fall through */
1275 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1276 && !(ifr->ifr_media & IFM_FDX)) {
1277 device_printf(sc->dev,
1278 "1000baseTX half-duplex unsupported\n");
1281 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1284 error = ether_ioctl(ifp, command, data);
1290 mge_miibus_readreg(device_t dev, int phy, int reg)
1292 struct mge_softc *sc;
1295 sc = device_get_softc(dev);
1297 MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1298 (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1300 retries = MGE_SMI_READ_RETRIES;
1301 while (--retries && !(MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_READVALID))
1302 DELAY(MGE_SMI_READ_DELAY);
1305 device_printf(dev, "Timeout while reading from PHY\n");
1307 return (MGE_READ(sc_mge0, MGE_REG_SMI) & 0xffff);
1311 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1313 struct mge_softc *sc;
1316 sc = device_get_softc(dev);
1318 MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1319 (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1321 retries = MGE_SMI_WRITE_RETRIES;
1322 while (--retries && MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_BUSY)
1323 DELAY(MGE_SMI_WRITE_DELAY);
1326 device_printf(dev, "Timeout while writing to PHY\n");
1331 mge_probe(device_t dev)
1334 if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1337 device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1338 return (BUS_PROBE_DEFAULT);
1342 mge_resume(device_t dev)
1345 device_printf(dev, "%s\n", __FUNCTION__);
1350 mge_shutdown(device_t dev)
1352 struct mge_softc *sc = device_get_softc(dev);
1354 MGE_GLOBAL_LOCK(sc);
1356 #ifdef DEVICE_POLLING
1357 if (sc->ifp->if_capenable & IFCAP_POLLING)
1358 ether_poll_deregister(sc->ifp);
1363 MGE_GLOBAL_UNLOCK(sc);
1369 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1371 struct mge_desc_wrapper *dw = NULL;
1373 bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1381 /* Check for free descriptors */
1382 if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1383 /* No free descriptors */
1387 /* Fetch unused map */
1388 desc_no = sc->tx_desc_curr;
1389 dw = &sc->mge_tx_desc[desc_no];
1390 mapp = dw->buffer_dmap;
1392 /* Create mapping in DMA memory */
1393 error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1395 if (error != 0 || nsegs != 1 ) {
1396 bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1397 return ((error != 0) ? error : -1);
1400 bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1402 /* Everything is ok, now we can send buffers */
1403 for (seg = 0; seg < nsegs; seg++) {
1404 dw->mge_desc->byte_count = segs[seg].ds_len;
1405 dw->mge_desc->buffer = segs[seg].ds_addr;
1407 dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
1408 MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1412 mge_offload_setup_descriptor(sc, dw);
1415 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1416 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1418 sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1419 sc->tx_desc_used_count++;
1426 struct mge_softc *sc = msc;
1428 /* Check for TX timeout */
1433 /* Check for media type change */
1434 if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1435 mge_ifmedia_upd(sc->ifp);
1437 /* Schedule another timeout one second from now */
1438 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1442 mge_watchdog(struct mge_softc *sc)
1448 MGE_GLOBAL_LOCK(sc);
1450 if (sc->wd_timer == 0 || --sc->wd_timer) {
1451 MGE_GLOBAL_UNLOCK(sc);
1456 if_printf(ifp, "watchdog timeout\n");
1459 mge_init_locked(sc);
1461 MGE_GLOBAL_UNLOCK(sc);
1465 mge_start(struct ifnet *ifp)
1467 struct mge_softc *sc = ifp->if_softc;
1469 MGE_TRANSMIT_LOCK(sc);
1471 mge_start_locked(ifp);
1473 MGE_TRANSMIT_UNLOCK(sc);
1477 mge_start_locked(struct ifnet *ifp)
1479 struct mge_softc *sc;
1480 struct mbuf *m0, *mtmp;
1481 uint32_t reg_val, queued = 0;
1485 MGE_TRANSMIT_LOCK_ASSERT(sc);
1487 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1492 /* Get packet from the queue */
1493 IF_DEQUEUE(&ifp->if_snd, m0);
1497 mtmp = m_defrag(m0, M_DONTWAIT);
1501 if (mge_encap(sc, m0)) {
1502 IF_PREPEND(&ifp->if_snd, m0);
1503 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1511 /* Enable transmitter and watchdog timer */
1512 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1513 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1519 mge_stop(struct mge_softc *sc)
1522 volatile uint32_t reg_val, status;
1523 struct mge_desc_wrapper *dw;
1524 struct mge_desc *desc;
1529 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1532 /* Stop tick engine */
1533 callout_stop(&sc->wd_callout);
1535 /* Disable interface */
1536 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1539 /* Disable interrupts */
1540 mge_intrs_ctrl(sc, 0);
1542 /* Disable Rx and Tx */
1543 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1544 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1545 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1547 /* Remove pending data from TX queue */
1548 while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1549 sc->tx_desc_used_count) {
1550 /* Get the descriptor */
1551 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1552 desc = dw->mge_desc;
1553 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1554 BUS_DMASYNC_POSTREAD);
1556 /* Get descriptor status */
1557 status = desc->cmd_status;
1559 if (status & MGE_DMA_OWNED)
1562 sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1564 sc->tx_desc_used_count--;
1566 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1567 BUS_DMASYNC_POSTWRITE);
1568 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1570 m_freem(dw->buffer);
1571 dw->buffer = (struct mbuf*)NULL;
1574 /* Wait for end of transmission */
1577 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1578 if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1579 (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1585 if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1588 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1589 reg_val &= ~(PORT_SERIAL_ENABLE);
1590 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1594 mge_suspend(device_t dev)
1597 device_printf(dev, "%s\n", __FUNCTION__);
1602 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1603 uint32_t status, uint16_t bufsize)
1607 if (ifp->if_capenable & IFCAP_RXCSUM) {
1608 if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1609 csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1611 if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1612 (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1613 (status & MGE_RX_L4_CSUM_OK)) {
1614 csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1615 frame->m_pkthdr.csum_data = 0xFFFF;
1618 frame->m_pkthdr.csum_flags = csum_flags;
1623 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1625 struct mbuf *m0 = dw->buffer;
1626 struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1627 int csum_flags = m0->m_pkthdr.csum_flags;
1633 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1634 etype = ntohs(eh->evl_proto);
1635 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1636 csum_flags |= MGE_TX_VLAN_TAGGED;
1638 etype = ntohs(eh->evl_encap_proto);
1639 ehlen = ETHER_HDR_LEN;
1642 if (etype != ETHERTYPE_IP) {
1644 "TCP/IP Offload enabled for unsupported "
1649 ip = (struct ip *)(m0->m_data + ehlen);
1650 cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1652 if ((m0->m_flags & M_FRAG) == 0)
1653 cmd_status |= MGE_TX_NOT_FRAGMENT;
1656 if (csum_flags & CSUM_IP)
1657 cmd_status |= MGE_TX_GEN_IP_CSUM;
1659 if (csum_flags & CSUM_TCP)
1660 cmd_status |= MGE_TX_GEN_L4_CSUM;
1662 if (csum_flags & CSUM_UDP)
1663 cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1665 dw->mge_desc->cmd_status |= cmd_status;
1669 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1673 MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1674 MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1675 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1676 MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1677 MGE_PORT_INT_EXT_TXBUF0);
1679 MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1680 MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1682 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1683 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1685 MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1686 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1691 mge_crc8(uint8_t *data, int size)
1694 static const uint8_t ct[256] = {
1695 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1696 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1697 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1698 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1699 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1700 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1701 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1702 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1703 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1704 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1705 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1706 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1707 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1708 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1709 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1710 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1711 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1712 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1713 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1714 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1715 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1716 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1717 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1718 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1719 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1720 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1721 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1722 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1723 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1724 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1725 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1726 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1730 crc = ct[crc ^ *(data++)];
1736 mge_setup_multicast(struct mge_softc *sc)
1738 uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1739 uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1740 uint32_t smt[MGE_MCAST_REG_NUMBER];
1741 uint32_t omt[MGE_MCAST_REG_NUMBER];
1742 struct ifnet *ifp = sc->ifp;
1743 struct ifmultiaddr *ifma;
1747 if (ifp->if_flags & IFF_ALLMULTI) {
1748 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1749 smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1751 memset(smt, 0, sizeof(smt));
1752 memset(omt, 0, sizeof(omt));
1754 if_maddr_rlock(ifp);
1755 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1756 if (ifma->ifma_addr->sa_family != AF_LINK)
1759 mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1760 if (memcmp(mac, special, sizeof(special)) == 0) {
1762 smt[i >> 2] |= v << ((i & 0x03) << 3);
1764 i = mge_crc8(mac, ETHER_ADDR_LEN);
1765 omt[i >> 2] |= v << ((i & 0x03) << 3);
1768 if_maddr_runlock(ifp);
1771 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1772 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1773 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1778 mge_set_rxic(struct mge_softc *sc)
1782 if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1783 sc->rx_ic_time = sc->mge_rx_ipg_max;
1785 reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1786 reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1787 reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1788 MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1792 mge_set_txic(struct mge_softc *sc)
1796 if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1797 sc->tx_ic_time = sc->mge_tfut_ipg_max;
1799 reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1800 reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1801 reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1802 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1806 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1808 struct mge_softc *sc = (struct mge_softc *)arg1;
1812 time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1813 error = sysctl_handle_int(oidp, &time, 0, req);
1817 MGE_GLOBAL_LOCK(sc);
1818 if (arg2 == MGE_IC_RX) {
1819 sc->rx_ic_time = time;
1822 sc->tx_ic_time = time;
1825 MGE_GLOBAL_UNLOCK(sc);
1831 mge_add_sysctls(struct mge_softc *sc)
1833 struct sysctl_ctx_list *ctx;
1834 struct sysctl_oid_list *children;
1835 struct sysctl_oid *tree;
1837 ctx = device_get_sysctl_ctx(sc->dev);
1838 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1839 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1840 CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1841 children = SYSCTL_CHILDREN(tree);
1843 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1844 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1845 "I", "IC RX time threshold");
1846 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1847 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1848 "I", "IC TX time threshold");