2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5 * Developed by Semihalf.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of MARVELL nor the names of contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
50 #include <net/ethernet.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
63 #include <sys/sockio.h>
65 #include <machine/bus.h>
67 #include <machine/resource.h>
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
72 #include <dev/fdt/fdt_common.h>
73 #include <dev/ofw/ofw_bus.h>
74 #include <dev/ofw/ofw_bus_subr.h>
76 #include <dev/mge/if_mgevar.h>
77 #include <arm/mv/mvreg.h>
78 #include <arm/mv/mvvar.h>
80 #include "miibus_if.h"
82 static int mge_probe(device_t dev);
83 static int mge_attach(device_t dev);
84 static int mge_detach(device_t dev);
85 static int mge_shutdown(device_t dev);
86 static int mge_suspend(device_t dev);
87 static int mge_resume(device_t dev);
89 static int mge_miibus_readreg(device_t dev, int phy, int reg);
90 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
92 static int mge_ifmedia_upd(struct ifnet *ifp);
93 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
95 static void mge_init(void *arg);
96 static void mge_init_locked(void *arg);
97 static void mge_start(struct ifnet *ifp);
98 static void mge_start_locked(struct ifnet *ifp);
99 static void mge_watchdog(struct mge_softc *sc);
100 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
102 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
103 static uint32_t mge_rx_ipg(uint32_t val, int ver);
104 static void mge_ver_params(struct mge_softc *sc);
106 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
107 static void mge_intr_rxtx(void *arg);
108 static void mge_intr_rx(void *arg);
109 static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
110 uint32_t int_cause_ext);
111 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
112 static void mge_intr_tx(void *arg);
113 static void mge_intr_tx_locked(struct mge_softc *sc);
114 static void mge_intr_misc(void *arg);
115 static void mge_intr_sum(void *arg);
116 static void mge_intr_err(void *arg);
117 static void mge_stop(struct mge_softc *sc);
118 static void mge_tick(void *msc);
119 static uint32_t mge_set_port_serial_control(uint32_t media);
120 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
121 static void mge_set_mac_address(struct mge_softc *sc);
122 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
124 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
125 static int mge_allocate_dma(struct mge_softc *sc);
126 static int mge_alloc_desc_dma(struct mge_softc *sc,
127 struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
128 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
129 struct mbuf **mbufp, bus_addr_t *paddr);
130 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
131 static void mge_free_dma(struct mge_softc *sc);
132 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
133 bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
134 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
135 uint32_t status, uint16_t bufsize);
136 static void mge_offload_setup_descriptor(struct mge_softc *sc,
137 struct mge_desc_wrapper *dw);
138 static uint8_t mge_crc8(uint8_t *data, int size);
139 static void mge_setup_multicast(struct mge_softc *sc);
140 static void mge_set_rxic(struct mge_softc *sc);
141 static void mge_set_txic(struct mge_softc *sc);
142 static void mge_add_sysctls(struct mge_softc *sc);
143 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
145 static device_method_t mge_methods[] = {
146 /* Device interface */
147 DEVMETHOD(device_probe, mge_probe),
148 DEVMETHOD(device_attach, mge_attach),
149 DEVMETHOD(device_detach, mge_detach),
150 DEVMETHOD(device_shutdown, mge_shutdown),
151 DEVMETHOD(device_suspend, mge_suspend),
152 DEVMETHOD(device_resume, mge_resume),
154 DEVMETHOD(miibus_readreg, mge_miibus_readreg),
155 DEVMETHOD(miibus_writereg, mge_miibus_writereg),
159 static driver_t mge_driver = {
162 sizeof(struct mge_softc),
165 static devclass_t mge_devclass;
167 DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0);
168 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
169 MODULE_DEPEND(mge, ether, 1, 1, 1);
170 MODULE_DEPEND(mge, miibus, 1, 1, 1);
172 static struct resource_spec res_spec[] = {
173 { SYS_RES_MEMORY, 0, RF_ACTIVE },
174 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
175 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
176 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
181 driver_intr_t *handler;
183 } mge_intrs[MGE_INTR_COUNT + 1] = {
184 { mge_intr_rxtx,"GbE aggregated interrupt" },
185 { mge_intr_rx, "GbE receive interrupt" },
186 { mge_intr_tx, "GbE transmit interrupt" },
187 { mge_intr_misc,"GbE misc interrupt" },
188 { mge_intr_sum, "GbE summary interrupt" },
189 { mge_intr_err, "GbE error interrupt" },
193 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
195 uint32_t mac_l, mac_h;
200 * Retrieve hw address from the device tree.
202 i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
205 for (i = 0; i < 6; i++)
212 bcopy(lmac, addr, 6);
218 * Fall back -- use the currently programmed address.
220 mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
221 mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
223 addr[0] = (mac_h & 0xff000000) >> 24;
224 addr[1] = (mac_h & 0x00ff0000) >> 16;
225 addr[2] = (mac_h & 0x0000ff00) >> 8;
226 addr[3] = (mac_h & 0x000000ff);
227 addr[4] = (mac_l & 0x0000ff00) >> 8;
228 addr[5] = (mac_l & 0x000000ff);
232 mge_tfut_ipg(uint32_t val, int ver)
237 return ((val & 0x3fff) << 4);
240 return ((val & 0xffff) << 4);
245 mge_rx_ipg(uint32_t val, int ver)
250 return ((val & 0x3fff) << 8);
253 return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
258 mge_ver_params(struct mge_softc *sc)
263 if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
264 d == MV_DEV_88F6282 ||
265 d == MV_DEV_MV78100 ||
266 d == MV_DEV_MV78100_Z0 ||
267 (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
270 sc->mge_tfut_ipg_max = 0xFFFF;
271 sc->mge_rx_ipg_max = 0xFFFF;
272 sc->mge_tx_arb_cfg = 0xFC0000FF;
273 sc->mge_tx_tok_cfg = 0xFFFF7FFF;
274 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
278 sc->mge_tfut_ipg_max = 0x3FFF;
279 sc->mge_rx_ipg_max = 0x3FFF;
280 sc->mge_tx_arb_cfg = 0x000000FF;
281 sc->mge_tx_tok_cfg = 0x3FFFFFFF;
282 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
284 if (d == MV_DEV_88RC8180)
285 sc->mge_intr_cnt = 1;
287 sc->mge_intr_cnt = 2;
289 if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
296 mge_set_mac_address(struct mge_softc *sc)
299 uint32_t mac_l, mac_h;
301 MGE_GLOBAL_LOCK_ASSERT(sc);
303 if_mac = (char *)IF_LLADDR(sc->ifp);
305 mac_l = (if_mac[4] << 8) | (if_mac[5]);
306 mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
307 (if_mac[2] << 8) | (if_mac[3] << 0);
309 MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
310 MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
312 mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
316 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
318 uint32_t reg_idx, reg_off, reg_val, i;
321 reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
322 reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
323 reg_val = (1 | (queue << 1)) << reg_off;
325 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
327 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
329 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
334 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
336 uint32_t port_config;
339 /* Enable or disable promiscuous mode as needed */
340 if (sc->ifp->if_flags & IFF_PROMISC) {
341 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
342 port_config |= PORT_CONFIG_UPM;
343 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
345 reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
346 (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
348 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
349 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
350 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
353 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
354 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
357 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
358 port_config &= ~PORT_CONFIG_UPM;
359 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
361 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
362 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
363 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
366 mge_set_mac_address(sc);
371 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
375 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
378 *paddr = segs->ds_addr;
382 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
385 struct mbuf *new_mbuf;
386 bus_dma_segment_t seg[1];
390 KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
392 new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
393 if (new_mbuf == NULL)
395 new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
398 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
399 bus_dmamap_unload(tag, map);
402 error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
404 KASSERT(nsegs == 1, ("Too many segments returned!"));
405 if (nsegs != 1 || error)
406 panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
408 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
411 (*paddr) = seg->ds_addr;
416 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
417 uint32_t size, bus_dma_tag_t *buffer_tag)
419 struct mge_desc_wrapper *dw;
420 bus_addr_t desc_paddr;
424 for (i = size - 1; i >= 0; i--) {
426 error = bus_dmamem_alloc(sc->mge_desc_dtag,
427 (void**)&(dw->mge_desc),
428 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
432 if_printf(sc->ifp, "failed to allocate DMA memory\n");
437 error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
438 dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
439 &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
442 if_printf(sc->ifp, "can't load descriptor\n");
443 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
449 /* Chain descriptors */
450 dw->mge_desc->next_desc = desc_paddr;
451 desc_paddr = dw->mge_desc_paddr;
453 tab[size - 1].mge_desc->next_desc = desc_paddr;
455 /* Allocate a busdma tag for mbufs. */
456 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
457 1, 0, /* alignment, boundary */
458 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
459 BUS_SPACE_MAXADDR, /* highaddr */
460 NULL, NULL, /* filtfunc, filtfuncarg */
461 MCLBYTES, 1, /* maxsize, nsegments */
462 MCLBYTES, 0, /* maxsegsz, flags */
463 NULL, NULL, /* lockfunc, lockfuncarg */
464 buffer_tag); /* dmat */
466 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
470 /* Create TX busdma maps */
471 for (i = 0; i < size; i++) {
473 error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
475 if_printf(sc->ifp, "failed to create map for mbuf\n");
479 dw->buffer = (struct mbuf*)NULL;
480 dw->mge_desc->buffer = (bus_addr_t)NULL;
487 mge_allocate_dma(struct mge_softc *sc)
490 struct mge_desc_wrapper *dw;
493 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
494 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
495 16, 0, /* alignment, boundary */
496 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
497 BUS_SPACE_MAXADDR, /* highaddr */
498 NULL, NULL, /* filtfunc, filtfuncarg */
499 sizeof(struct mge_desc), 1, /* maxsize, nsegments */
500 sizeof(struct mge_desc), 0, /* maxsegsz, flags */
501 NULL, NULL, /* lockfunc, lockfuncarg */
502 &sc->mge_desc_dtag); /* dmat */
505 mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
507 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
510 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
511 dw = &(sc->mge_rx_desc[i]);
512 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
513 &dw->mge_desc->buffer);
516 sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
517 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
523 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
524 uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
526 struct mge_desc_wrapper *dw;
529 for (i = 0; i < size; i++) {
533 if (dw->buffer_dmap) {
535 bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
536 BUS_DMASYNC_POSTREAD);
537 bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
539 bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
543 /* Free RX descriptors */
545 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
546 BUS_DMASYNC_POSTREAD);
547 bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
548 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
555 mge_free_dma(struct mge_softc *sc)
557 /* Free desciptors and mbufs */
558 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
559 mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
561 /* Destroy mbuf dma tag */
562 bus_dma_tag_destroy(sc->mge_tx_dtag);
563 bus_dma_tag_destroy(sc->mge_rx_dtag);
564 /* Destroy descriptors tag */
565 bus_dma_tag_destroy(sc->mge_desc_dtag);
569 mge_reinit_rx(struct mge_softc *sc)
571 struct mge_desc_wrapper *dw;
574 MGE_RECEIVE_LOCK_ASSERT(sc);
576 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
578 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
581 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
582 dw = &(sc->mge_rx_desc[i]);
583 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
584 &dw->mge_desc->buffer);
587 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
588 sc->rx_desc_curr = 0;
590 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
593 /* Enable RX queue */
594 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
597 #ifdef DEVICE_POLLING
598 static poll_handler_t mge_poll;
601 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
603 struct mge_softc *sc = ifp->if_softc;
604 uint32_t int_cause, int_cause_ext;
609 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
610 MGE_GLOBAL_UNLOCK(sc);
614 if (cmd == POLL_AND_CHECK_STATUS) {
615 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
616 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
618 /* Check for resource error */
619 if (int_cause & MGE_PORT_INT_RXERRQ0)
622 if (int_cause || int_cause_ext) {
623 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
624 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
628 mge_intr_tx_locked(sc);
629 rx_npkts = mge_intr_rx_locked(sc, count);
631 MGE_GLOBAL_UNLOCK(sc);
634 #endif /* DEVICE_POLLING */
637 mge_attach(device_t dev)
639 struct mge_softc *sc;
640 struct mii_softc *miisc;
642 uint8_t hwaddr[ETHER_ADDR_LEN];
645 sc = device_get_softc(dev);
647 sc->node = ofw_bus_get_node(dev);
649 /* Set chip version-dependent parameters */
652 /* Get phy address and used softc from fdt */
653 if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) != 0)
656 /* Initialize mutexes */
657 mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
658 mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
660 /* Allocate IO and IRQ resources */
661 error = bus_alloc_resources(dev, res_spec, sc->res);
663 device_printf(dev, "could not allocate resources\n");
668 /* Allocate DMA, buffers, buffer descriptors */
669 error = mge_allocate_dma(sc);
675 sc->tx_desc_curr = 0;
676 sc->rx_desc_curr = 0;
677 sc->tx_desc_used_idx = 0;
678 sc->tx_desc_used_count = 0;
680 /* Configure defaults for interrupts coalescing */
681 sc->rx_ic_time = 768;
682 sc->tx_ic_time = 768;
685 /* Allocate network interface */
686 ifp = sc->ifp = if_alloc(IFT_ETHER);
688 device_printf(dev, "if_alloc() failed\n");
693 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
695 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
696 ifp->if_capabilities = IFCAP_VLAN_MTU;
697 if (sc->mge_hw_csum) {
698 ifp->if_capabilities |= IFCAP_HWCSUM;
699 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
701 ifp->if_capenable = ifp->if_capabilities;
703 #ifdef DEVICE_POLLING
704 /* Advertise that polling is supported */
705 ifp->if_capabilities |= IFCAP_POLLING;
708 ifp->if_init = mge_init;
709 ifp->if_start = mge_start;
710 ifp->if_ioctl = mge_ioctl;
712 ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
713 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
714 IFQ_SET_READY(&ifp->if_snd);
716 mge_get_mac_address(sc, hwaddr);
717 ether_ifattach(ifp, hwaddr);
718 callout_init(&sc->wd_callout, 0);
721 error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
722 mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
724 device_printf(dev, "attaching PHYs failed\n");
728 sc->mii = device_get_softc(sc->miibus);
730 /* Tell the MAC where to find the PHY so autoneg works */
731 miisc = LIST_FIRST(&sc->mii->mii_phys);
732 MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
734 /* Attach interrupt handlers */
735 /* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
736 for (i = 1; i <= sc->mge_intr_cnt; ++i) {
737 error = bus_setup_intr(dev, sc->res[i],
738 INTR_TYPE_NET | INTR_MPSAFE,
739 NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
740 sc, &sc->ih_cookie[i - 1]);
742 device_printf(dev, "could not setup %s\n",
743 mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
753 mge_detach(device_t dev)
755 struct mge_softc *sc;
758 sc = device_get_softc(dev);
760 /* Stop controller and free TX queue */
764 /* Wait for stopping ticks */
765 callout_drain(&sc->wd_callout);
767 /* Stop and release all interrupts */
768 for (i = 0; i < sc->mge_intr_cnt; ++i) {
769 if (!sc->ih_cookie[i])
772 error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
774 device_printf(dev, "could not release %s\n",
775 mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
778 /* Detach network interface */
780 ether_ifdetach(sc->ifp);
784 /* Free DMA resources */
787 /* Free IO memory handler */
788 bus_release_resources(dev, res_spec, sc->res);
790 /* Destroy mutexes */
791 mtx_destroy(&sc->receive_lock);
792 mtx_destroy(&sc->transmit_lock);
798 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
800 struct mge_softc *sc = ifp->if_softc;
801 struct mii_data *mii;
803 MGE_TRANSMIT_LOCK(sc);
808 ifmr->ifm_active = mii->mii_media_active;
809 ifmr->ifm_status = mii->mii_media_status;
811 MGE_TRANSMIT_UNLOCK(sc);
815 mge_set_port_serial_control(uint32_t media)
817 uint32_t port_config;
819 port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
820 PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
822 if (IFM_TYPE(media) == IFM_ETHER) {
823 switch(IFM_SUBTYPE(media)) {
827 port_config |= (PORT_SERIAL_GMII_SPEED_1000 |
828 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
829 PORT_SERIAL_SPEED_AUTONEG);
832 port_config |= (PORT_SERIAL_MII_SPEED_100 |
833 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
834 PORT_SERIAL_SPEED_AUTONEG);
837 port_config |= (PORT_SERIAL_AUTONEG |
838 PORT_SERIAL_AUTONEG_FC |
839 PORT_SERIAL_SPEED_AUTONEG);
843 port_config |= PORT_SERIAL_FULL_DUPLEX;
845 return (port_config);
849 mge_ifmedia_upd(struct ifnet *ifp)
851 struct mge_softc *sc = ifp->if_softc;
853 if (ifp->if_flags & IFF_UP) {
856 sc->mge_media_status = sc->mii->mii_media.ifm_media;
857 mii_mediachg(sc->mii);
860 MGE_GLOBAL_UNLOCK(sc);
869 struct mge_softc *sc = arg;
873 mge_init_locked(arg);
875 MGE_GLOBAL_UNLOCK(sc);
879 mge_init_locked(void *arg)
881 struct mge_softc *sc = arg;
882 struct mge_desc_wrapper *dw;
883 volatile uint32_t reg_val;
887 MGE_GLOBAL_LOCK_ASSERT(sc);
892 /* Disable interrupts */
893 mge_intrs_ctrl(sc, 0);
895 /* Set MAC address */
896 mge_set_mac_address(sc);
898 /* Setup multicast filters */
899 mge_setup_multicast(sc);
901 if (sc->mge_ver == 2) {
902 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
903 MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
906 /* Initialize TX queue configuration registers */
907 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
908 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
909 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
911 /* Clear TX queue configuration registers for unused queues */
912 for (i = 1; i < 7; i++) {
913 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
914 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
915 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
918 /* Set default MTU */
919 MGE_WRITE(sc, sc->mge_mtu, 0);
921 /* Port configuration */
922 MGE_WRITE(sc, MGE_PORT_CONFIG,
923 PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
924 PORT_CONFIG_ARO_RXQ(0));
925 MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
927 /* Setup port configuration */
928 reg_val = mge_set_port_serial_control(sc->mge_media_status);
929 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
931 /* Setup SDMA configuration */
932 MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
933 MGE_SDMA_TX_BYTE_SWAP |
934 MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
935 MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
937 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
939 MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
940 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
943 /* Reset descriptor indexes */
944 sc->tx_desc_curr = 0;
945 sc->rx_desc_curr = 0;
946 sc->tx_desc_used_idx = 0;
947 sc->tx_desc_used_count = 0;
949 /* Enable RX descriptors */
950 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
951 dw = &sc->mge_rx_desc[i];
952 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
953 dw->mge_desc->buff_size = MCLBYTES;
954 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
955 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
958 /* Enable RX queue */
959 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
962 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
963 reg_val |= PORT_SERIAL_ENABLE;
964 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
967 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
968 if (reg_val & MGE_STATUS_LINKUP)
972 if_printf(sc->ifp, "Timeout on link-up\n");
977 /* Setup interrupts coalescing */
981 /* Enable interrupts */
982 #ifdef DEVICE_POLLING
984 * * ...only if polling is not turned on. Disable interrupts explicitly
985 * if polling is enabled.
987 if (sc->ifp->if_capenable & IFCAP_POLLING)
988 mge_intrs_ctrl(sc, 0);
990 #endif /* DEVICE_POLLING */
991 mge_intrs_ctrl(sc, 1);
993 /* Activate network interface */
994 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
995 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
998 /* Schedule watchdog timeout */
999 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1003 mge_intr_rxtx(void *arg)
1005 struct mge_softc *sc = arg;
1006 uint32_t int_cause, int_cause_ext;
1008 MGE_GLOBAL_LOCK(sc);
1010 #ifdef DEVICE_POLLING
1011 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1012 MGE_GLOBAL_UNLOCK(sc);
1017 /* Get interrupt cause */
1018 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1019 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1021 /* Check for Transmit interrupt */
1022 if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
1023 MGE_PORT_INT_EXT_TXUR)) {
1024 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1025 (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1026 mge_intr_tx_locked(sc);
1029 MGE_TRANSMIT_UNLOCK(sc);
1031 /* Check for Receive interrupt */
1032 mge_intr_rx_check(sc, int_cause, int_cause_ext);
1034 MGE_RECEIVE_UNLOCK(sc);
1038 mge_intr_err(void *arg)
1040 struct mge_softc *sc = arg;
1044 if_printf(ifp, "%s\n", __FUNCTION__);
1048 mge_intr_misc(void *arg)
1050 struct mge_softc *sc = arg;
1054 if_printf(ifp, "%s\n", __FUNCTION__);
1058 mge_intr_rx(void *arg) {
1059 struct mge_softc *sc = arg;
1060 uint32_t int_cause, int_cause_ext;
1062 MGE_RECEIVE_LOCK(sc);
1064 #ifdef DEVICE_POLLING
1065 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1066 MGE_RECEIVE_UNLOCK(sc);
1071 /* Get interrupt cause */
1072 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1073 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1075 mge_intr_rx_check(sc, int_cause, int_cause_ext);
1077 MGE_RECEIVE_UNLOCK(sc);
1081 mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
1082 uint32_t int_cause_ext)
1084 /* Check for resource error */
1085 if (int_cause & MGE_PORT_INT_RXERRQ0) {
1087 MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1088 ~(int_cause & MGE_PORT_INT_RXERRQ0));
1091 int_cause &= MGE_PORT_INT_RXQ0;
1092 int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1094 if (int_cause || int_cause_ext) {
1095 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1096 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1097 mge_intr_rx_locked(sc, -1);
1102 mge_intr_rx_locked(struct mge_softc *sc, int count)
1104 struct ifnet *ifp = sc->ifp;
1107 struct mge_desc_wrapper* dw;
1111 MGE_RECEIVE_LOCK_ASSERT(sc);
1113 while (count != 0) {
1114 dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1115 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1116 BUS_DMASYNC_POSTREAD);
1119 status = dw->mge_desc->cmd_status;
1120 bufsize = dw->mge_desc->buff_size;
1121 if ((status & MGE_DMA_OWNED) != 0)
1124 if (dw->mge_desc->byte_count &&
1125 ~(status & MGE_ERR_SUMMARY)) {
1127 bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1128 BUS_DMASYNC_POSTREAD);
1130 mb = m_devget(dw->buffer->m_data,
1131 dw->mge_desc->byte_count - ETHER_CRC_LEN,
1135 /* Give up if no mbufs */
1139 mb->m_pkthdr.len -= 2;
1142 mge_offload_process_frame(ifp, mb, status,
1145 MGE_RECEIVE_UNLOCK(sc);
1146 (*ifp->if_input)(ifp, mb);
1147 MGE_RECEIVE_LOCK(sc);
1151 dw->mge_desc->byte_count = 0;
1152 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1153 sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1154 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1155 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1165 mge_intr_sum(void *arg)
1167 struct mge_softc *sc = arg;
1171 if_printf(ifp, "%s\n", __FUNCTION__);
1175 mge_intr_tx(void *arg)
1177 struct mge_softc *sc = arg;
1178 uint32_t int_cause_ext;
1180 MGE_TRANSMIT_LOCK(sc);
1182 #ifdef DEVICE_POLLING
1183 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1184 MGE_TRANSMIT_UNLOCK(sc);
1189 /* Ack the interrupt */
1190 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1191 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1192 (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1194 mge_intr_tx_locked(sc);
1196 MGE_TRANSMIT_UNLOCK(sc);
1201 mge_intr_tx_locked(struct mge_softc *sc)
1203 struct ifnet *ifp = sc->ifp;
1204 struct mge_desc_wrapper *dw;
1205 struct mge_desc *desc;
1209 MGE_TRANSMIT_LOCK_ASSERT(sc);
1211 /* Disable watchdog */
1214 while (sc->tx_desc_used_count) {
1215 /* Get the descriptor */
1216 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1217 desc = dw->mge_desc;
1218 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1219 BUS_DMASYNC_POSTREAD);
1221 /* Get descriptor status */
1222 status = desc->cmd_status;
1224 if (status & MGE_DMA_OWNED)
1227 sc->tx_desc_used_idx =
1228 (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1229 sc->tx_desc_used_count--;
1231 /* Update collision statistics */
1232 if (status & MGE_ERR_SUMMARY) {
1233 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1234 ifp->if_collisions++;
1235 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1236 ifp->if_collisions += 16;
1239 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1240 BUS_DMASYNC_POSTWRITE);
1241 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1242 m_freem(dw->buffer);
1243 dw->buffer = (struct mbuf*)NULL;
1250 /* Now send anything that was pending */
1251 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1252 mge_start_locked(ifp);
1257 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1259 struct mge_softc *sc = ifp->if_softc;
1260 struct ifreq *ifr = (struct ifreq *)data;
1268 MGE_GLOBAL_LOCK(sc);
1270 if (ifp->if_flags & IFF_UP) {
1271 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1272 flags = ifp->if_flags ^ sc->mge_if_flags;
1273 if (flags & IFF_PROMISC)
1274 mge_set_prom_mode(sc,
1275 MGE_RX_DEFAULT_QUEUE);
1277 if (flags & IFF_ALLMULTI)
1278 mge_setup_multicast(sc);
1280 mge_init_locked(sc);
1282 else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1285 sc->mge_if_flags = ifp->if_flags;
1286 MGE_GLOBAL_UNLOCK(sc);
1290 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1291 MGE_GLOBAL_LOCK(sc);
1292 mge_setup_multicast(sc);
1293 MGE_GLOBAL_UNLOCK(sc);
1297 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1298 if (mask & IFCAP_HWCSUM) {
1299 ifp->if_capenable &= ~IFCAP_HWCSUM;
1300 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1301 if (ifp->if_capenable & IFCAP_TXCSUM)
1302 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1304 ifp->if_hwassist = 0;
1306 #ifdef DEVICE_POLLING
1307 if (mask & IFCAP_POLLING) {
1308 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1309 error = ether_poll_register(mge_poll, ifp);
1313 MGE_GLOBAL_LOCK(sc);
1314 mge_intrs_ctrl(sc, 0);
1315 ifp->if_capenable |= IFCAP_POLLING;
1316 MGE_GLOBAL_UNLOCK(sc);
1318 error = ether_poll_deregister(ifp);
1319 MGE_GLOBAL_LOCK(sc);
1320 mge_intrs_ctrl(sc, 1);
1321 ifp->if_capenable &= ~IFCAP_POLLING;
1322 MGE_GLOBAL_UNLOCK(sc);
1327 case SIOCGIFMEDIA: /* fall through */
1329 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1330 && !(ifr->ifr_media & IFM_FDX)) {
1331 device_printf(sc->dev,
1332 "1000baseTX half-duplex unsupported\n");
1335 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1338 error = ether_ioctl(ifp, command, data);
1344 mge_miibus_readreg(device_t dev, int phy, int reg)
1346 struct mge_softc *sc;
1349 sc = device_get_softc(dev);
1351 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
1352 (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1354 retries = MGE_SMI_READ_RETRIES;
1356 !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
1357 DELAY(MGE_SMI_READ_DELAY);
1360 device_printf(dev, "Timeout while reading from PHY\n");
1362 return (MGE_READ(sc->phy_sc, MGE_REG_SMI) & 0xffff);
1366 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1368 struct mge_softc *sc;
1371 sc = device_get_softc(dev);
1373 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
1374 (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1376 retries = MGE_SMI_WRITE_RETRIES;
1377 while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
1378 DELAY(MGE_SMI_WRITE_DELAY);
1381 device_printf(dev, "Timeout while writing to PHY\n");
1386 mge_probe(device_t dev)
1389 if (!ofw_bus_status_okay(dev))
1392 if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1395 device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1396 return (BUS_PROBE_DEFAULT);
1400 mge_resume(device_t dev)
1403 device_printf(dev, "%s\n", __FUNCTION__);
1408 mge_shutdown(device_t dev)
1410 struct mge_softc *sc = device_get_softc(dev);
1412 MGE_GLOBAL_LOCK(sc);
1414 #ifdef DEVICE_POLLING
1415 if (sc->ifp->if_capenable & IFCAP_POLLING)
1416 ether_poll_deregister(sc->ifp);
1421 MGE_GLOBAL_UNLOCK(sc);
1427 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1429 struct mge_desc_wrapper *dw = NULL;
1431 bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1439 /* Check for free descriptors */
1440 if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1441 /* No free descriptors */
1445 /* Fetch unused map */
1446 desc_no = sc->tx_desc_curr;
1447 dw = &sc->mge_tx_desc[desc_no];
1448 mapp = dw->buffer_dmap;
1450 /* Create mapping in DMA memory */
1451 error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1453 if (error != 0 || nsegs != 1 ) {
1454 bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1455 return ((error != 0) ? error : -1);
1458 bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1460 /* Everything is ok, now we can send buffers */
1461 for (seg = 0; seg < nsegs; seg++) {
1462 dw->mge_desc->byte_count = segs[seg].ds_len;
1463 dw->mge_desc->buffer = segs[seg].ds_addr;
1465 dw->mge_desc->cmd_status = 0;
1467 mge_offload_setup_descriptor(sc, dw);
1468 dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1469 MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1473 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1474 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1476 sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1477 sc->tx_desc_used_count++;
1484 struct mge_softc *sc = msc;
1486 /* Check for TX timeout */
1491 /* Check for media type change */
1492 if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1493 mge_ifmedia_upd(sc->ifp);
1495 /* Schedule another timeout one second from now */
1496 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1500 mge_watchdog(struct mge_softc *sc)
1506 MGE_GLOBAL_LOCK(sc);
1508 if (sc->wd_timer == 0 || --sc->wd_timer) {
1509 MGE_GLOBAL_UNLOCK(sc);
1514 if_printf(ifp, "watchdog timeout\n");
1517 mge_init_locked(sc);
1519 MGE_GLOBAL_UNLOCK(sc);
1523 mge_start(struct ifnet *ifp)
1525 struct mge_softc *sc = ifp->if_softc;
1527 MGE_TRANSMIT_LOCK(sc);
1529 mge_start_locked(ifp);
1531 MGE_TRANSMIT_UNLOCK(sc);
1535 mge_start_locked(struct ifnet *ifp)
1537 struct mge_softc *sc;
1538 struct mbuf *m0, *mtmp;
1539 uint32_t reg_val, queued = 0;
1543 MGE_TRANSMIT_LOCK_ASSERT(sc);
1545 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1550 /* Get packet from the queue */
1551 IF_DEQUEUE(&ifp->if_snd, m0);
1555 mtmp = m_defrag(m0, M_NOWAIT);
1559 if (mge_encap(sc, m0)) {
1560 IF_PREPEND(&ifp->if_snd, m0);
1561 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1569 /* Enable transmitter and watchdog timer */
1570 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1571 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1577 mge_stop(struct mge_softc *sc)
1580 volatile uint32_t reg_val, status;
1581 struct mge_desc_wrapper *dw;
1582 struct mge_desc *desc;
1587 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1590 /* Stop tick engine */
1591 callout_stop(&sc->wd_callout);
1593 /* Disable interface */
1594 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1597 /* Disable interrupts */
1598 mge_intrs_ctrl(sc, 0);
1600 /* Disable Rx and Tx */
1601 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1602 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1603 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1605 /* Remove pending data from TX queue */
1606 while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1607 sc->tx_desc_used_count) {
1608 /* Get the descriptor */
1609 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1610 desc = dw->mge_desc;
1611 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1612 BUS_DMASYNC_POSTREAD);
1614 /* Get descriptor status */
1615 status = desc->cmd_status;
1617 if (status & MGE_DMA_OWNED)
1620 sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1622 sc->tx_desc_used_count--;
1624 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1625 BUS_DMASYNC_POSTWRITE);
1626 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1628 m_freem(dw->buffer);
1629 dw->buffer = (struct mbuf*)NULL;
1632 /* Wait for end of transmission */
1635 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1636 if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1637 (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1643 if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1646 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1647 reg_val &= ~(PORT_SERIAL_ENABLE);
1648 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1652 mge_suspend(device_t dev)
1655 device_printf(dev, "%s\n", __FUNCTION__);
1660 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1661 uint32_t status, uint16_t bufsize)
1665 if (ifp->if_capenable & IFCAP_RXCSUM) {
1666 if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1667 csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1669 if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1670 (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1671 (status & MGE_RX_L4_CSUM_OK)) {
1672 csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1673 frame->m_pkthdr.csum_data = 0xFFFF;
1676 frame->m_pkthdr.csum_flags = csum_flags;
1681 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1683 struct mbuf *m0 = dw->buffer;
1684 struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1685 int csum_flags = m0->m_pkthdr.csum_flags;
1691 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1692 etype = ntohs(eh->evl_proto);
1693 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1694 csum_flags |= MGE_TX_VLAN_TAGGED;
1696 etype = ntohs(eh->evl_encap_proto);
1697 ehlen = ETHER_HDR_LEN;
1700 if (etype != ETHERTYPE_IP) {
1702 "TCP/IP Offload enabled for unsupported "
1707 ip = (struct ip *)(m0->m_data + ehlen);
1708 cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1709 cmd_status |= MGE_TX_NOT_FRAGMENT;
1712 if (csum_flags & CSUM_IP)
1713 cmd_status |= MGE_TX_GEN_IP_CSUM;
1715 if (csum_flags & CSUM_TCP)
1716 cmd_status |= MGE_TX_GEN_L4_CSUM;
1718 if (csum_flags & CSUM_UDP)
1719 cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1721 dw->mge_desc->cmd_status |= cmd_status;
1725 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1729 MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1730 MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1731 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1732 MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1733 MGE_PORT_INT_EXT_TXBUF0);
1735 MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1736 MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1738 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1739 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1741 MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1742 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1747 mge_crc8(uint8_t *data, int size)
1750 static const uint8_t ct[256] = {
1751 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1752 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1753 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1754 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1755 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1756 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1757 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1758 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1759 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1760 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1761 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1762 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1763 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1764 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1765 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1766 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1767 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1768 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1769 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1770 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1771 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1772 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1773 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1774 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1775 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1776 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1777 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1778 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1779 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1780 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1781 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1782 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1786 crc = ct[crc ^ *(data++)];
1792 mge_setup_multicast(struct mge_softc *sc)
1794 uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1795 uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1796 uint32_t smt[MGE_MCAST_REG_NUMBER];
1797 uint32_t omt[MGE_MCAST_REG_NUMBER];
1798 struct ifnet *ifp = sc->ifp;
1799 struct ifmultiaddr *ifma;
1803 if (ifp->if_flags & IFF_ALLMULTI) {
1804 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1805 smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1807 memset(smt, 0, sizeof(smt));
1808 memset(omt, 0, sizeof(omt));
1810 if_maddr_rlock(ifp);
1811 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1812 if (ifma->ifma_addr->sa_family != AF_LINK)
1815 mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1816 if (memcmp(mac, special, sizeof(special)) == 0) {
1818 smt[i >> 2] |= v << ((i & 0x03) << 3);
1820 i = mge_crc8(mac, ETHER_ADDR_LEN);
1821 omt[i >> 2] |= v << ((i & 0x03) << 3);
1824 if_maddr_runlock(ifp);
1827 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1828 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1829 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1834 mge_set_rxic(struct mge_softc *sc)
1838 if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1839 sc->rx_ic_time = sc->mge_rx_ipg_max;
1841 reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1842 reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1843 reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1844 MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1848 mge_set_txic(struct mge_softc *sc)
1852 if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1853 sc->tx_ic_time = sc->mge_tfut_ipg_max;
1855 reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1856 reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1857 reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1858 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1862 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1864 struct mge_softc *sc = (struct mge_softc *)arg1;
1868 time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1869 error = sysctl_handle_int(oidp, &time, 0, req);
1873 MGE_GLOBAL_LOCK(sc);
1874 if (arg2 == MGE_IC_RX) {
1875 sc->rx_ic_time = time;
1878 sc->tx_ic_time = time;
1881 MGE_GLOBAL_UNLOCK(sc);
1887 mge_add_sysctls(struct mge_softc *sc)
1889 struct sysctl_ctx_list *ctx;
1890 struct sysctl_oid_list *children;
1891 struct sysctl_oid *tree;
1893 ctx = device_get_sysctl_ctx(sc->dev);
1894 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1895 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1896 CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1897 children = SYSCTL_CHILDREN(tree);
1899 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1900 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1901 "I", "IC RX time threshold");
1902 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1903 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1904 "I", "IC TX time threshold");