2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5 * Developed by Semihalf.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of MARVELL nor the names of contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
50 #include <net/ethernet.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
63 #include <sys/sockio.h>
65 #include <machine/bus.h>
67 #include <machine/resource.h>
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
72 #define MV_PHY_ADDR_BASE 8
74 #include <dev/mge/if_mgevar.h>
75 #include <arm/mv/mvreg.h>
76 #include <arm/mv/mvvar.h>
78 #include "miibus_if.h"
80 /* PHY registers are in the address space of the first mge unit */
81 static struct mge_softc *sc_mge0 = NULL;
83 static int mge_probe(device_t dev);
84 static int mge_attach(device_t dev);
85 static int mge_detach(device_t dev);
86 static int mge_shutdown(device_t dev);
87 static int mge_suspend(device_t dev);
88 static int mge_resume(device_t dev);
90 static int mge_miibus_readreg(device_t dev, int phy, int reg);
91 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
93 static int mge_ifmedia_upd(struct ifnet *ifp);
94 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
96 static void mge_init(void *arg);
97 static void mge_init_locked(void *arg);
98 static void mge_start(struct ifnet *ifp);
99 static void mge_start_locked(struct ifnet *ifp);
100 static void mge_watchdog(struct mge_softc *sc);
101 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
103 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
104 static uint32_t mge_rx_ipg(uint32_t val, int ver);
105 static void mge_ver_params(struct mge_softc *sc);
107 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
108 static void mge_intr_rx(void *arg);
109 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
110 static void mge_intr_tx(void *arg);
111 static void mge_intr_tx_locked(struct mge_softc *sc);
112 static void mge_intr_misc(void *arg);
113 static void mge_intr_sum(void *arg);
114 static void mge_intr_err(void *arg);
115 static void mge_stop(struct mge_softc *sc);
116 static void mge_tick(void *msc);
117 static uint32_t mge_set_port_serial_control(uint32_t media);
118 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
119 static void mge_set_mac_address(struct mge_softc *sc);
120 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
122 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
123 static int mge_allocate_dma(struct mge_softc *sc);
124 static int mge_alloc_desc_dma(struct mge_softc *sc,
125 struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
126 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
127 struct mbuf **mbufp, bus_addr_t *paddr);
128 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
129 static void mge_free_dma(struct mge_softc *sc);
130 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
131 bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
132 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
133 uint32_t status, uint16_t bufsize);
134 static void mge_offload_setup_descriptor(struct mge_softc *sc,
135 struct mge_desc_wrapper *dw);
136 static uint8_t mge_crc8(uint8_t *data, int size);
137 static void mge_setup_multicast(struct mge_softc *sc);
138 static void mge_set_rxic(struct mge_softc *sc);
139 static void mge_set_txic(struct mge_softc *sc);
140 static void mge_add_sysctls(struct mge_softc *sc);
141 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
143 static device_method_t mge_methods[] = {
144 /* Device interface */
145 DEVMETHOD(device_probe, mge_probe),
146 DEVMETHOD(device_attach, mge_attach),
147 DEVMETHOD(device_detach, mge_detach),
148 DEVMETHOD(device_shutdown, mge_shutdown),
149 DEVMETHOD(device_suspend, mge_suspend),
150 DEVMETHOD(device_resume, mge_resume),
152 DEVMETHOD(miibus_readreg, mge_miibus_readreg),
153 DEVMETHOD(miibus_writereg, mge_miibus_writereg),
157 static driver_t mge_driver = {
160 sizeof(struct mge_softc),
163 static devclass_t mge_devclass;
165 DRIVER_MODULE(mge, mbus, mge_driver, mge_devclass, 0, 0);
166 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
167 MODULE_DEPEND(mge, ether, 1, 1, 1);
168 MODULE_DEPEND(mge, miibus, 1, 1, 1);
170 static struct resource_spec res_spec[] = {
171 { SYS_RES_MEMORY, 0, RF_ACTIVE },
172 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
173 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
174 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
175 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
176 { SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
181 driver_intr_t *handler;
183 } mge_intrs[MGE_INTR_COUNT] = {
184 { mge_intr_rx, "GbE receive interrupt" },
185 { mge_intr_tx, "GbE transmit interrupt" },
186 { mge_intr_misc,"GbE misc interrupt" },
187 { mge_intr_sum, "GbE summary interrupt" },
188 { mge_intr_err, "GbE error interrupt" },
192 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
194 uint32_t mac_l, mac_h;
196 /* XXX use currently programmed MAC address; eventually this info will
197 * be provided by the loader */
199 mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
200 mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
202 addr[0] = (mac_h & 0xff000000) >> 24;
203 addr[1] = (mac_h & 0x00ff0000) >> 16;
204 addr[2] = (mac_h & 0x0000ff00) >> 8;
205 addr[3] = (mac_h & 0x000000ff);
206 addr[4] = (mac_l & 0x0000ff00) >> 8;
207 addr[5] = (mac_l & 0x000000ff);
211 mge_tfut_ipg(uint32_t val, int ver)
216 return ((val & 0x3fff) << 4);
219 return ((val & 0xffff) << 4);
224 mge_rx_ipg(uint32_t val, int ver)
229 return ((val & 0x3fff) << 8);
232 return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
237 mge_ver_params(struct mge_softc *sc)
242 if (d == MV_DEV_88F6281 || d == MV_DEV_MV78100 ||
243 d == MV_DEV_MV78100_Z0) {
246 sc->mge_tfut_ipg_max = 0xFFFF;
247 sc->mge_rx_ipg_max = 0xFFFF;
248 sc->mge_tx_arb_cfg = 0xFC0000FF;
249 sc->mge_tx_tok_cfg = 0xFFFF7FFF;
250 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
254 sc->mge_tfut_ipg_max = 0x3FFF;
255 sc->mge_rx_ipg_max = 0x3FFF;
256 sc->mge_tx_arb_cfg = 0x000000FF;
257 sc->mge_tx_tok_cfg = 0x3FFFFFFF;
258 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
263 mge_set_mac_address(struct mge_softc *sc)
266 uint32_t mac_l, mac_h;
268 MGE_GLOBAL_LOCK_ASSERT(sc);
270 if_mac = (char *)IF_LLADDR(sc->ifp);
272 mac_l = (if_mac[4] << 8) | (if_mac[5]);
273 mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
274 (if_mac[2] << 8) | (if_mac[3] << 0);
276 MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
277 MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
279 mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
283 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
285 uint32_t reg_idx, reg_off, reg_val, i;
288 reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
289 reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
290 reg_val = (1 | (queue << 1)) << reg_off;
292 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
294 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
296 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
301 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
303 uint32_t port_config;
306 /* Enable or disable promiscuous mode as needed */
307 if (sc->ifp->if_flags & IFF_PROMISC) {
308 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
309 port_config |= PORT_CONFIG_UPM;
310 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
312 reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
313 (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
315 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
316 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
317 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
320 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
321 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
324 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
325 port_config &= ~PORT_CONFIG_UPM;
326 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
328 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
329 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
330 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
333 mge_set_mac_address(sc);
338 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
342 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
345 *paddr = segs->ds_addr;
349 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
352 struct mbuf *new_mbuf;
353 bus_dma_segment_t seg[1];
357 KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
359 new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
360 if (new_mbuf == NULL)
362 new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
365 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
366 bus_dmamap_unload(tag, map);
369 error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
371 KASSERT(nsegs == 1, ("Too many segments returned!"));
372 if (nsegs != 1 || error)
373 panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
375 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
378 (*paddr) = seg->ds_addr;
383 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
384 uint32_t size, bus_dma_tag_t *buffer_tag)
386 struct mge_desc_wrapper *dw;
387 bus_addr_t desc_paddr;
391 for (i = size - 1; i >= 0; i--) {
393 error = bus_dmamem_alloc(sc->mge_desc_dtag,
394 (void**)&(dw->mge_desc),
395 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
399 if_printf(sc->ifp, "failed to allocate DMA memory\n");
404 error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
405 dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
406 &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
409 if_printf(sc->ifp, "can't load descriptor\n");
410 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
416 /* Chain descriptors */
417 dw->mge_desc->next_desc = desc_paddr;
418 desc_paddr = dw->mge_desc_paddr;
420 tab[size - 1].mge_desc->next_desc = desc_paddr;
422 /* Allocate a busdma tag for mbufs. */
423 error = bus_dma_tag_create(NULL, /* parent */
424 8, 0, /* alignment, boundary */
425 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
426 BUS_SPACE_MAXADDR, /* highaddr */
427 NULL, NULL, /* filtfunc, filtfuncarg */
428 MCLBYTES, 1, /* maxsize, nsegments */
429 MCLBYTES, 0, /* maxsegsz, flags */
430 NULL, NULL, /* lockfunc, lockfuncarg */
431 buffer_tag); /* dmat */
433 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
437 /* Create TX busdma maps */
438 for (i = 0; i < size; i++) {
440 error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
442 if_printf(sc->ifp, "failed to create map for mbuf\n");
446 dw->buffer = (struct mbuf*)NULL;
447 dw->mge_desc->buffer = (bus_addr_t)NULL;
454 mge_allocate_dma(struct mge_softc *sc)
457 struct mge_desc_wrapper *dw;
461 num = MGE_TX_DESC_NUM + MGE_RX_DESC_NUM;
463 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
464 error = bus_dma_tag_create(NULL, /* parent */
465 16, 0, /* alignment, boundary */
466 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
467 BUS_SPACE_MAXADDR, /* highaddr */
468 NULL, NULL, /* filtfunc, filtfuncarg */
469 sizeof(struct mge_desc), 1, /* maxsize, nsegments */
470 sizeof(struct mge_desc), 0, /* maxsegsz, flags */
471 NULL, NULL, /* lockfunc, lockfuncarg */
472 &sc->mge_desc_dtag); /* dmat */
475 mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
477 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
480 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
481 dw = &(sc->mge_rx_desc[i]);
482 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
483 &dw->mge_desc->buffer);
486 sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
487 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
493 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
494 uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
496 struct mge_desc_wrapper *dw;
499 for (i = 0; i < size; i++) {
503 if (dw->buffer_dmap) {
505 bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
506 BUS_DMASYNC_POSTREAD);
507 bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
509 bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
513 /* Free RX descriptors */
515 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
516 BUS_DMASYNC_POSTREAD);
517 bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
518 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
525 mge_free_dma(struct mge_softc *sc)
527 /* Free desciptors and mbufs */
528 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
529 mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
531 /* Destroy mbuf dma tag */
532 bus_dma_tag_destroy(sc->mge_tx_dtag);
533 bus_dma_tag_destroy(sc->mge_rx_dtag);
534 /* Destroy descriptors tag */
535 bus_dma_tag_destroy(sc->mge_desc_dtag);
539 mge_reinit_rx(struct mge_softc *sc)
541 struct mge_desc_wrapper *dw;
544 MGE_RECEIVE_LOCK(sc);
546 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
548 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
551 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
552 dw = &(sc->mge_rx_desc[i]);
553 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
554 &dw->mge_desc->buffer);
557 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
558 sc->rx_desc_curr = 0;
560 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
563 /* Enable RX queue */
564 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
566 MGE_RECEIVE_UNLOCK(sc);
569 #ifdef DEVICE_POLLING
570 static poll_handler_t mge_poll;
573 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
575 struct mge_softc *sc = ifp->if_softc;
576 uint32_t int_cause, int_cause_ext;
581 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
582 MGE_GLOBAL_UNLOCK(sc);
586 if (cmd == POLL_AND_CHECK_STATUS) {
587 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
588 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
590 /* Check for resource error */
591 if (int_cause & MGE_PORT_INT_RXERRQ0)
594 if (int_cause || int_cause_ext) {
595 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
596 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
600 mge_intr_tx_locked(sc);
601 rx_npkts = mge_intr_rx_locked(sc, count);
603 MGE_GLOBAL_UNLOCK(sc);
606 #endif /* DEVICE_POLLING */
609 mge_attach(device_t dev)
611 struct mge_softc *sc;
613 uint8_t hwaddr[ETHER_ADDR_LEN];
616 sc = device_get_softc(dev);
619 if (device_get_unit(dev) == 0)
622 /* Set chip version-dependent parameters */
625 /* Initialize mutexes */
626 mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
627 mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
629 /* Allocate IO and IRQ resources */
630 error = bus_alloc_resources(dev, res_spec, sc->res);
632 device_printf(dev, "could not allocate resources\n");
637 /* Allocate DMA, buffers, buffer descriptors */
638 error = mge_allocate_dma(sc);
644 sc->tx_desc_curr = 0;
645 sc->rx_desc_curr = 0;
646 sc->tx_desc_used_idx = 0;
647 sc->tx_desc_used_count = 0;
649 /* Configure defaults for interrupts coalescing */
650 sc->rx_ic_time = 768;
651 sc->tx_ic_time = 768;
654 /* Allocate network interface */
655 ifp = sc->ifp = if_alloc(IFT_ETHER);
657 device_printf(dev, "if_alloc() failed\n");
662 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
664 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
665 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
666 ifp->if_capenable = ifp->if_capabilities;
667 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
669 #ifdef DEVICE_POLLING
670 /* Advertise that polling is supported */
671 ifp->if_capabilities |= IFCAP_POLLING;
674 ifp->if_init = mge_init;
675 ifp->if_start = mge_start;
676 ifp->if_ioctl = mge_ioctl;
678 ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
679 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
680 IFQ_SET_READY(&ifp->if_snd);
682 mge_get_mac_address(sc, hwaddr);
683 ether_ifattach(ifp, hwaddr);
684 callout_init(&sc->wd_callout, 0);
687 error = mii_phy_probe(dev, &sc->miibus, mge_ifmedia_upd, mge_ifmedia_sts);
689 device_printf(dev, "MII failed to find PHY\n");
695 sc->mii = device_get_softc(sc->miibus);
697 /* Attach interrupt handlers */
698 for (i = 0; i < 2; ++i) {
699 error = bus_setup_intr(dev, sc->res[1 + i],
700 INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
701 sc, &sc->ih_cookie[i]);
703 device_printf(dev, "could not setup %s\n",
704 mge_intrs[i].description);
705 ether_ifdetach(sc->ifp);
714 mge_detach(device_t dev)
716 struct mge_softc *sc;
719 sc = device_get_softc(dev);
721 /* Stop controller and free TX queue */
725 /* Wait for stopping ticks */
726 callout_drain(&sc->wd_callout);
728 /* Stop and release all interrupts */
729 for (i = 0; i < 2; ++i) {
730 error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
732 device_printf(dev, "could not release %s\n",
733 mge_intrs[i].description);
736 /* Detach network interface */
738 ether_ifdetach(sc->ifp);
742 /* Free DMA resources */
745 /* Free IO memory handler */
746 bus_release_resources(dev, res_spec, sc->res);
748 /* Destroy mutexes */
749 mtx_destroy(&sc->receive_lock);
750 mtx_destroy(&sc->transmit_lock);
756 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
758 struct mge_softc *sc = ifp->if_softc;
759 struct mii_data *mii;
761 MGE_TRANSMIT_LOCK(sc);
766 ifmr->ifm_active = mii->mii_media_active;
767 ifmr->ifm_status = mii->mii_media_status;
769 MGE_TRANSMIT_UNLOCK(sc);
773 mge_set_port_serial_control(uint32_t media)
775 uint32_t port_config;
777 port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
778 PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
780 if (IFM_TYPE(media) == IFM_ETHER) {
781 switch(IFM_SUBTYPE(media)) {
785 port_config |= (PORT_SERIAL_GMII_SPEED_1000 |
786 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
787 PORT_SERIAL_SPEED_AUTONEG);
790 port_config |= (PORT_SERIAL_MII_SPEED_100 |
791 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
792 PORT_SERIAL_SPEED_AUTONEG);
795 port_config |= (PORT_SERIAL_AUTONEG |
796 PORT_SERIAL_AUTONEG_FC |
797 PORT_SERIAL_SPEED_AUTONEG);
801 port_config |= PORT_SERIAL_FULL_DUPLEX;
803 return (port_config);
807 mge_ifmedia_upd(struct ifnet *ifp)
809 struct mge_softc *sc = ifp->if_softc;
811 if (ifp->if_flags & IFF_UP) {
814 sc->mge_media_status = sc->mii->mii_media.ifm_media;
815 mii_mediachg(sc->mii);
818 MGE_GLOBAL_UNLOCK(sc);
827 struct mge_softc *sc = arg;
831 mge_init_locked(arg);
833 MGE_GLOBAL_UNLOCK(sc);
837 mge_init_locked(void *arg)
839 struct mge_softc *sc = arg;
840 struct mge_desc_wrapper *dw;
841 volatile uint32_t reg_val;
845 MGE_GLOBAL_LOCK_ASSERT(sc);
850 /* Disable interrupts */
851 mge_intrs_ctrl(sc, 0);
853 /* Set MAC address */
854 mge_set_mac_address(sc);
856 /* Setup multicast filters */
857 mge_setup_multicast(sc);
859 if (sc->mge_ver == 2) {
860 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
861 MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
864 /* Initialize TX queue configuration registers */
865 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
866 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
867 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
869 /* Clear TX queue configuration registers for unused queues */
870 for (i = 1; i < 7; i++) {
871 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
872 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
873 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
876 /* Set default MTU */
877 MGE_WRITE(sc, sc->mge_mtu, 0);
879 /* Port configuration */
880 MGE_WRITE(sc, MGE_PORT_CONFIG,
881 PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
882 PORT_CONFIG_ARO_RXQ(0));
883 MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
885 /* Setup port configuration */
886 reg_val = mge_set_port_serial_control(sc->mge_media_status);
887 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
889 /* Setup SDMA configuration */
890 MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
891 MGE_SDMA_TX_BYTE_SWAP |
892 MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
893 MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
895 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
897 MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
898 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
901 /* Reset descriptor indexes */
902 sc->tx_desc_curr = 0;
903 sc->rx_desc_curr = 0;
904 sc->tx_desc_used_idx = 0;
905 sc->tx_desc_used_count = 0;
907 /* Enable RX descriptors */
908 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
909 dw = &sc->mge_rx_desc[i];
910 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
911 dw->mge_desc->buff_size = MCLBYTES;
912 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
913 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
916 /* Enable RX queue */
917 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
920 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
921 reg_val |= PORT_SERIAL_ENABLE;
922 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
925 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
926 if (reg_val & MGE_STATUS_LINKUP)
930 if_printf(sc->ifp, "Timeout on link-up\n");
935 /* Setup interrupts coalescing */
939 /* Enable interrupts */
940 #ifdef DEVICE_POLLING
942 * * ...only if polling is not turned on. Disable interrupts explicitly
943 * if polling is enabled.
945 if (sc->ifp->if_capenable & IFCAP_POLLING)
946 mge_intrs_ctrl(sc, 0);
948 #endif /* DEVICE_POLLING */
949 mge_intrs_ctrl(sc, 1);
951 /* Activate network interface */
952 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
953 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
956 /* Schedule watchdog timeout */
957 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
961 mge_intr_err(void *arg)
963 struct mge_softc *sc = arg;
967 if_printf(ifp, "%s\n", __FUNCTION__);
971 mge_intr_misc(void *arg)
973 struct mge_softc *sc = arg;
977 if_printf(ifp, "%s\n", __FUNCTION__);
981 mge_intr_rx(void *arg) {
982 struct mge_softc *sc = arg;
983 uint32_t int_cause, int_cause_ext;
985 MGE_RECEIVE_LOCK(sc);
987 #ifdef DEVICE_POLLING
988 if (sc->ifp->if_capenable & IFCAP_POLLING) {
989 MGE_RECEIVE_UNLOCK(sc);
994 /* Get interrupt cause */
995 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
996 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
998 /* Check for resource error */
999 if (int_cause & MGE_PORT_INT_RXERRQ0) {
1001 MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1002 int_cause & ~MGE_PORT_INT_RXERRQ0);
1005 int_cause &= MGE_PORT_INT_RXQ0;
1006 int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1008 if (int_cause || int_cause_ext) {
1009 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1010 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1011 mge_intr_rx_locked(sc, -1);
1014 MGE_RECEIVE_UNLOCK(sc);
1019 mge_intr_rx_locked(struct mge_softc *sc, int count)
1021 struct ifnet *ifp = sc->ifp;
1024 struct mge_desc_wrapper* dw;
1028 MGE_RECEIVE_LOCK_ASSERT(sc);
1030 while (count != 0) {
1031 dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1032 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1033 BUS_DMASYNC_POSTREAD);
1036 status = dw->mge_desc->cmd_status;
1037 bufsize = dw->mge_desc->buff_size;
1038 if ((status & MGE_DMA_OWNED) != 0)
1041 if (dw->mge_desc->byte_count &&
1042 ~(status & MGE_ERR_SUMMARY)) {
1044 bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1045 BUS_DMASYNC_POSTREAD);
1047 mb = m_devget(dw->buffer->m_data,
1048 dw->mge_desc->byte_count - ETHER_CRC_LEN,
1052 /* Give up if no mbufs */
1056 mb->m_pkthdr.len -= 2;
1059 mge_offload_process_frame(ifp, mb, status,
1062 MGE_RECEIVE_UNLOCK(sc);
1063 (*ifp->if_input)(ifp, mb);
1064 MGE_RECEIVE_LOCK(sc);
1068 dw->mge_desc->byte_count = 0;
1069 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1070 sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1071 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1072 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1082 mge_intr_sum(void *arg)
1084 struct mge_softc *sc = arg;
1088 if_printf(ifp, "%s\n", __FUNCTION__);
1092 mge_intr_tx(void *arg)
1094 struct mge_softc *sc = arg;
1095 uint32_t int_cause_ext;
1097 MGE_TRANSMIT_LOCK(sc);
1099 #ifdef DEVICE_POLLING
1100 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1101 MGE_TRANSMIT_UNLOCK(sc);
1106 /* Ack the interrupt */
1107 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1108 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
1109 int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
1111 mge_intr_tx_locked(sc);
1113 MGE_TRANSMIT_UNLOCK(sc);
1118 mge_intr_tx_locked(struct mge_softc *sc)
1120 struct ifnet *ifp = sc->ifp;
1121 struct mge_desc_wrapper *dw;
1122 struct mge_desc *desc;
1126 MGE_TRANSMIT_LOCK_ASSERT(sc);
1128 /* Disable watchdog */
1131 while (sc->tx_desc_used_count) {
1132 /* Get the descriptor */
1133 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1134 desc = dw->mge_desc;
1135 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1136 BUS_DMASYNC_POSTREAD);
1138 /* Get descriptor status */
1139 status = desc->cmd_status;
1141 if (status & MGE_DMA_OWNED)
1144 sc->tx_desc_used_idx =
1145 (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;;
1146 sc->tx_desc_used_count--;
1148 /* Update collision statistics */
1149 if (status & MGE_ERR_SUMMARY) {
1150 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1151 ifp->if_collisions++;
1152 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1153 ifp->if_collisions += 16;
1156 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1157 BUS_DMASYNC_POSTWRITE);
1158 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1159 m_freem(dw->buffer);
1160 dw->buffer = (struct mbuf*)NULL;
1167 /* Now send anything that was pending */
1168 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1169 mge_start_locked(ifp);
1174 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1176 struct mge_softc *sc = ifp->if_softc;
1177 struct ifreq *ifr = (struct ifreq *)data;
1185 MGE_GLOBAL_LOCK(sc);
1187 if (ifp->if_flags & IFF_UP) {
1188 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1189 flags = ifp->if_flags ^ sc->mge_if_flags;
1190 if (flags & IFF_PROMISC)
1191 mge_set_prom_mode(sc,
1192 MGE_RX_DEFAULT_QUEUE);
1194 if (flags & IFF_ALLMULTI)
1195 mge_setup_multicast(sc);
1197 mge_init_locked(sc);
1199 else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1202 sc->mge_if_flags = ifp->if_flags;
1203 MGE_GLOBAL_UNLOCK(sc);
1207 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1208 MGE_GLOBAL_LOCK(sc);
1209 mge_setup_multicast(sc);
1210 MGE_GLOBAL_UNLOCK(sc);
1214 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1215 if (mask & IFCAP_HWCSUM) {
1216 ifp->if_capenable &= ~IFCAP_HWCSUM;
1217 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1218 if (ifp->if_capenable & IFCAP_TXCSUM)
1219 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1221 ifp->if_hwassist = 0;
1223 #ifdef DEVICE_POLLING
1224 if (mask & IFCAP_POLLING) {
1225 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1226 error = ether_poll_register(mge_poll, ifp);
1230 MGE_GLOBAL_LOCK(sc);
1231 mge_intrs_ctrl(sc, 0);
1232 ifp->if_capenable |= IFCAP_POLLING;
1233 MGE_GLOBAL_UNLOCK(sc);
1235 error = ether_poll_deregister(ifp);
1236 MGE_GLOBAL_LOCK(sc);
1237 mge_intrs_ctrl(sc, 1);
1238 ifp->if_capenable &= ~IFCAP_POLLING;
1239 MGE_GLOBAL_UNLOCK(sc);
1244 case SIOCGIFMEDIA: /* fall through */
1246 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1247 && !(ifr->ifr_media & IFM_FDX)) {
1248 device_printf(sc->dev,
1249 "1000baseTX half-duplex unsupported\n");
1252 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1255 error = ether_ioctl(ifp, command, data);
1261 mge_miibus_readreg(device_t dev, int phy, int reg)
1266 * We assume static PHY address <=> device unit mapping:
1267 * PHY Address = MV_PHY_ADDR_BASE + devce unit.
1268 * This is true for most Marvell boards.
1270 * Code below grants proper PHY detection on each device
1274 if ((MV_PHY_ADDR_BASE + device_get_unit(dev)) != phy)
1277 MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1278 (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1280 retries = MGE_SMI_READ_RETRIES;
1281 while (--retries && !(MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_READVALID))
1282 DELAY(MGE_SMI_READ_DELAY);
1285 device_printf(dev, "Timeout while reading from PHY\n");
1287 return (MGE_READ(sc_mge0, MGE_REG_SMI) & 0xffff);
1291 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1295 if ((MV_PHY_ADDR_BASE + device_get_unit(dev)) != phy)
1298 MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1299 (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1301 retries = MGE_SMI_WRITE_RETRIES;
1302 while (--retries && MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_BUSY)
1303 DELAY(MGE_SMI_WRITE_DELAY);
1306 device_printf(dev, "Timeout while writing to PHY\n");
1311 mge_probe(device_t dev)
1314 device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1315 return (BUS_PROBE_DEFAULT);
1319 mge_resume(device_t dev)
1322 device_printf(dev, "%s\n", __FUNCTION__);
1327 mge_shutdown(device_t dev)
1329 struct mge_softc *sc = device_get_softc(dev);
1331 MGE_GLOBAL_LOCK(sc);
1333 #ifdef DEVICE_POLLING
1334 if (sc->ifp->if_capenable & IFCAP_POLLING)
1335 ether_poll_deregister(sc->ifp);
1340 MGE_GLOBAL_UNLOCK(sc);
1346 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1348 struct mge_desc_wrapper *dw = NULL;
1350 bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1358 /* Check for free descriptors */
1359 if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1360 /* No free descriptors */
1364 /* Fetch unused map */
1365 desc_no = sc->tx_desc_curr;
1366 dw = &sc->mge_tx_desc[desc_no];
1367 mapp = dw->buffer_dmap;
1369 bus_dmamap_sync(sc->mge_desc_dtag, mapp,
1370 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1372 /* Create mapping in DMA memory */
1373 error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1375 if (error != 0 || nsegs != 1 ) {
1376 bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1377 return ((error != 0) ? error : -1);
1380 bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1382 /* Everything is ok, now we can send buffers */
1383 for (seg = 0; seg < nsegs; seg++) {
1384 dw->mge_desc->byte_count = segs[seg].ds_len;
1385 dw->mge_desc->buffer = segs[seg].ds_addr;
1387 dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
1388 MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1392 mge_offload_setup_descriptor(sc, dw);
1395 bus_dmamap_sync(sc->mge_desc_dtag, mapp,
1396 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1398 sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1399 sc->tx_desc_used_count++;
1406 struct mge_softc *sc = msc;
1408 /* Check for TX timeout */
1413 /* Check for media type change */
1414 if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1415 mge_ifmedia_upd(sc->ifp);
1417 /* Schedule another timeout one second from now */
1418 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1422 mge_watchdog(struct mge_softc *sc)
1428 MGE_GLOBAL_LOCK(sc);
1430 if (sc->wd_timer == 0 || --sc->wd_timer) {
1431 MGE_GLOBAL_UNLOCK(sc);
1436 if_printf(ifp, "watchdog timeout\n");
1439 mge_init_locked(sc);
1441 MGE_GLOBAL_UNLOCK(sc);
1445 mge_start(struct ifnet *ifp)
1447 struct mge_softc *sc = ifp->if_softc;
1449 MGE_TRANSMIT_LOCK(sc);
1451 mge_start_locked(ifp);
1453 MGE_TRANSMIT_UNLOCK(sc);
1457 mge_start_locked(struct ifnet *ifp)
1459 struct mge_softc *sc;
1460 struct mbuf *m0, *mtmp;
1461 uint32_t reg_val, queued = 0;
1465 MGE_TRANSMIT_LOCK_ASSERT(sc);
1467 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1472 /* Get packet from the queue */
1473 IF_DEQUEUE(&ifp->if_snd, m0);
1477 mtmp = m_defrag(m0, M_DONTWAIT);
1481 if (mge_encap(sc, m0)) {
1482 IF_PREPEND(&ifp->if_snd, m0);
1483 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1491 /* Enable transmitter and watchdog timer */
1492 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1493 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1499 mge_stop(struct mge_softc *sc)
1502 volatile uint32_t reg_val, status;
1503 struct mge_desc_wrapper *dw;
1504 struct mge_desc *desc;
1509 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1512 /* Stop tick engine */
1513 callout_stop(&sc->wd_callout);
1515 /* Disable interface */
1516 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1519 /* Disable interrupts */
1520 mge_intrs_ctrl(sc, 0);
1522 /* Disable Rx and Tx */
1523 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1524 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1525 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1527 /* Remove pending data from TX queue */
1528 while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1529 sc->tx_desc_used_count) {
1530 /* Get the descriptor */
1531 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1532 desc = dw->mge_desc;
1533 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1534 BUS_DMASYNC_POSTREAD);
1536 /* Get descriptor status */
1537 status = desc->cmd_status;
1539 if (status & MGE_DMA_OWNED)
1542 sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1544 sc->tx_desc_used_count--;
1546 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1547 BUS_DMASYNC_POSTWRITE);
1548 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1550 m_freem(dw->buffer);
1551 dw->buffer = (struct mbuf*)NULL;
1554 /* Wait for end of transmission */
1557 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1558 if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1559 (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1565 if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1568 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1569 reg_val &= ~(PORT_SERIAL_ENABLE);
1570 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1574 mge_suspend(device_t dev)
1577 device_printf(dev, "%s\n", __FUNCTION__);
1582 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1583 uint32_t status, uint16_t bufsize)
1587 if (ifp->if_capenable & IFCAP_RXCSUM) {
1588 if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1589 csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1591 if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1592 (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1593 (status & MGE_RX_L4_CSUM_OK)) {
1594 csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1595 frame->m_pkthdr.csum_data = 0xFFFF;
1598 frame->m_pkthdr.csum_flags = csum_flags;
1603 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1605 struct mbuf *m0 = dw->buffer;
1606 struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1607 int csum_flags = m0->m_pkthdr.csum_flags;
1613 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1614 etype = ntohs(eh->evl_proto);
1615 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1616 csum_flags |= MGE_TX_VLAN_TAGGED;
1618 etype = ntohs(eh->evl_encap_proto);
1619 ehlen = ETHER_HDR_LEN;
1622 if (etype != ETHERTYPE_IP) {
1624 "TCP/IP Offload enabled for unsupported "
1629 ip = (struct ip *)(m0->m_data + ehlen);
1630 cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1632 if ((m0->m_flags & M_FRAG) == 0)
1633 cmd_status |= MGE_TX_NOT_FRAGMENT;
1636 if (csum_flags & CSUM_IP)
1637 cmd_status |= MGE_TX_GEN_IP_CSUM;
1639 if (csum_flags & CSUM_TCP)
1640 cmd_status |= MGE_TX_GEN_L4_CSUM;
1642 if (csum_flags & CSUM_UDP)
1643 cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1645 dw->mge_desc->cmd_status |= cmd_status;
1649 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1653 MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1654 MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1655 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1656 MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1657 MGE_PORT_INT_EXT_TXBUF0);
1659 MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1660 MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1662 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1663 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1665 MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1666 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1671 mge_crc8(uint8_t *data, int size)
1674 static const uint8_t ct[256] = {
1675 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1676 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1677 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1678 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1679 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1680 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1681 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1682 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1683 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1684 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1685 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1686 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1687 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1688 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1689 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1690 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1691 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1692 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1693 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1694 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1695 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1696 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1697 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1698 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1699 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1700 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1701 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1702 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1703 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1704 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1705 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1706 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1710 crc = ct[crc ^ *(data++)];
1716 mge_setup_multicast(struct mge_softc *sc)
1718 uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1719 uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1720 uint32_t smt[MGE_MCAST_REG_NUMBER];
1721 uint32_t omt[MGE_MCAST_REG_NUMBER];
1722 struct ifnet *ifp = sc->ifp;
1723 struct ifmultiaddr *ifma;
1727 if (ifp->if_flags & IFF_ALLMULTI) {
1728 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1729 smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1731 memset(smt, 0, sizeof(smt));
1732 memset(omt, 0, sizeof(omt));
1734 if_maddr_rlock(ifp);
1735 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1736 if (ifma->ifma_addr->sa_family != AF_LINK)
1739 mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1740 if (memcmp(mac, special, sizeof(special)) == 0) {
1742 smt[i >> 2] |= v << ((i & 0x03) << 3);
1744 i = mge_crc8(mac, ETHER_ADDR_LEN);
1745 omt[i >> 2] |= v << ((i & 0x03) << 3);
1748 if_maddr_runlock(ifp);
1751 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1752 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1753 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1758 mge_set_rxic(struct mge_softc *sc)
1762 if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1763 sc->rx_ic_time = sc->mge_rx_ipg_max;
1765 reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1766 reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1767 reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1768 MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1772 mge_set_txic(struct mge_softc *sc)
1776 if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1777 sc->tx_ic_time = sc->mge_tfut_ipg_max;
1779 reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1780 reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1781 reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1782 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1786 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1788 struct mge_softc *sc = (struct mge_softc *)arg1;
1792 time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1793 error = sysctl_handle_int(oidp, &time, 0, req);
1797 MGE_GLOBAL_LOCK(sc);
1798 if (arg2 == MGE_IC_RX) {
1799 sc->rx_ic_time = time;
1802 sc->tx_ic_time = time;
1805 MGE_GLOBAL_UNLOCK(sc);
1811 mge_add_sysctls(struct mge_softc *sc)
1813 struct sysctl_ctx_list *ctx;
1814 struct sysctl_oid_list *children;
1815 struct sysctl_oid *tree;
1817 ctx = device_get_sysctl_ctx(sc->dev);
1818 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1819 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1820 CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1821 children = SYSCTL_CHILDREN(tree);
1823 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1824 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1825 "I", "IC RX time threshold");
1826 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1827 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1828 "I", "IC TX time threshold");