2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5 * Developed by Semihalf.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of MARVELL nor the names of contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
50 #include <net/ethernet.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
63 #include <sys/sockio.h>
65 #include <machine/bus.h>
67 #include <machine/resource.h>
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
73 #define MII_ADDR_BASE 8
76 #include <dev/mge/if_mgevar.h>
77 #include <arm/mv/mvreg.h>
78 #include <arm/mv/mvvar.h>
80 #include "miibus_if.h"
82 /* PHY registers are in the address space of the first mge unit */
83 static struct mge_softc *sc_mge0 = NULL;
85 static int mge_probe(device_t dev);
86 static int mge_attach(device_t dev);
87 static int mge_detach(device_t dev);
88 static int mge_shutdown(device_t dev);
89 static int mge_suspend(device_t dev);
90 static int mge_resume(device_t dev);
92 static int mge_miibus_readreg(device_t dev, int phy, int reg);
93 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
95 static int mge_ifmedia_upd(struct ifnet *ifp);
96 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
98 static void mge_init(void *arg);
99 static void mge_init_locked(void *arg);
100 static void mge_start(struct ifnet *ifp);
101 static void mge_start_locked(struct ifnet *ifp);
102 static void mge_watchdog(struct mge_softc *sc);
103 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
105 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
106 static uint32_t mge_rx_ipg(uint32_t val, int ver);
107 static void mge_ver_params(struct mge_softc *sc);
109 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
110 static void mge_intr_rx(void *arg);
111 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
112 static void mge_intr_tx(void *arg);
113 static void mge_intr_tx_locked(struct mge_softc *sc);
114 static void mge_intr_misc(void *arg);
115 static void mge_intr_sum(void *arg);
116 static void mge_intr_err(void *arg);
117 static void mge_stop(struct mge_softc *sc);
118 static void mge_tick(void *msc);
119 static uint32_t mge_set_port_serial_control(uint32_t media);
120 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
121 static void mge_set_mac_address(struct mge_softc *sc);
122 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
124 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
125 static int mge_allocate_dma(struct mge_softc *sc);
126 static int mge_alloc_desc_dma(struct mge_softc *sc,
127 struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
128 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
129 struct mbuf **mbufp, bus_addr_t *paddr);
130 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
131 static void mge_free_dma(struct mge_softc *sc);
132 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
133 bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
134 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
135 uint32_t status, uint16_t bufsize);
136 static void mge_offload_setup_descriptor(struct mge_softc *sc,
137 struct mge_desc_wrapper *dw);
138 static uint8_t mge_crc8(uint8_t *data, int size);
139 static void mge_setup_multicast(struct mge_softc *sc);
140 static void mge_set_rxic(struct mge_softc *sc);
141 static void mge_set_txic(struct mge_softc *sc);
142 static void mge_add_sysctls(struct mge_softc *sc);
143 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
145 static device_method_t mge_methods[] = {
146 /* Device interface */
147 DEVMETHOD(device_probe, mge_probe),
148 DEVMETHOD(device_attach, mge_attach),
149 DEVMETHOD(device_detach, mge_detach),
150 DEVMETHOD(device_shutdown, mge_shutdown),
151 DEVMETHOD(device_suspend, mge_suspend),
152 DEVMETHOD(device_resume, mge_resume),
154 DEVMETHOD(miibus_readreg, mge_miibus_readreg),
155 DEVMETHOD(miibus_writereg, mge_miibus_writereg),
159 static driver_t mge_driver = {
162 sizeof(struct mge_softc),
165 static devclass_t mge_devclass;
167 DRIVER_MODULE(mge, mbus, mge_driver, mge_devclass, 0, 0);
168 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
169 MODULE_DEPEND(mge, ether, 1, 1, 1);
170 MODULE_DEPEND(mge, miibus, 1, 1, 1);
172 static struct resource_spec res_spec[] = {
173 { SYS_RES_MEMORY, 0, RF_ACTIVE },
174 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
175 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
176 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
177 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
178 { SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
183 driver_intr_t *handler;
185 } mge_intrs[MGE_INTR_COUNT] = {
186 { mge_intr_rx, "GbE receive interrupt" },
187 { mge_intr_tx, "GbE transmit interrupt" },
188 { mge_intr_misc,"GbE misc interrupt" },
189 { mge_intr_sum, "GbE summary interrupt" },
190 { mge_intr_err, "GbE error interrupt" },
194 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
196 uint32_t mac_l, mac_h;
198 /* XXX use currently programmed MAC address; eventually this info will
199 * be provided by the loader */
201 mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
202 mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
204 addr[0] = (mac_h & 0xff000000) >> 24;
205 addr[1] = (mac_h & 0x00ff0000) >> 16;
206 addr[2] = (mac_h & 0x0000ff00) >> 8;
207 addr[3] = (mac_h & 0x000000ff);
208 addr[4] = (mac_l & 0x0000ff00) >> 8;
209 addr[5] = (mac_l & 0x000000ff);
213 mge_tfut_ipg(uint32_t val, int ver)
218 return ((val & 0x3fff) << 4);
221 return ((val & 0xffff) << 4);
226 mge_rx_ipg(uint32_t val, int ver)
231 return ((val & 0x3fff) << 8);
234 return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
239 mge_ver_params(struct mge_softc *sc)
244 if (d == MV_DEV_88F6281 || d == MV_DEV_MV78100 ||
245 d == MV_DEV_MV78100_Z0) {
248 sc->mge_tfut_ipg_max = 0xFFFF;
249 sc->mge_rx_ipg_max = 0xFFFF;
250 sc->mge_tx_arb_cfg = 0xFC0000FF;
251 sc->mge_tx_tok_cfg = 0xFFFF7FFF;
252 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
256 sc->mge_tfut_ipg_max = 0x3FFF;
257 sc->mge_rx_ipg_max = 0x3FFF;
258 sc->mge_tx_arb_cfg = 0x000000FF;
259 sc->mge_tx_tok_cfg = 0x3FFFFFFF;
260 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
265 mge_set_mac_address(struct mge_softc *sc)
268 uint32_t mac_l, mac_h;
270 MGE_GLOBAL_LOCK_ASSERT(sc);
272 if_mac = (char *)IF_LLADDR(sc->ifp);
274 mac_l = (if_mac[4] << 8) | (if_mac[5]);
275 mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
276 (if_mac[2] << 8) | (if_mac[3] << 0);
278 MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
279 MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
281 mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
285 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
287 uint32_t reg_idx, reg_off, reg_val, i;
290 reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
291 reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
292 reg_val = (1 | (queue << 1)) << reg_off;
294 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
296 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
298 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
303 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
305 uint32_t port_config;
308 /* Enable or disable promiscuous mode as needed */
309 if (sc->ifp->if_flags & IFF_PROMISC) {
310 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
311 port_config |= PORT_CONFIG_UPM;
312 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
314 reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
315 (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
317 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
318 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
319 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
322 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
323 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
326 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
327 port_config &= ~PORT_CONFIG_UPM;
328 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
330 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
331 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
332 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
335 mge_set_mac_address(sc);
340 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
344 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
347 *paddr = segs->ds_addr;
351 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
354 struct mbuf *new_mbuf;
355 bus_dma_segment_t seg[1];
359 KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
361 new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
362 if (new_mbuf == NULL)
364 new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
367 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
368 bus_dmamap_unload(tag, map);
371 error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
373 KASSERT(nsegs == 1, ("Too many segments returned!"));
374 if (nsegs != 1 || error)
375 panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
377 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
380 (*paddr) = seg->ds_addr;
385 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
386 uint32_t size, bus_dma_tag_t *buffer_tag)
388 struct mge_desc_wrapper *dw;
389 bus_addr_t desc_paddr;
393 for (i = size - 1; i >= 0; i--) {
395 error = bus_dmamem_alloc(sc->mge_desc_dtag,
396 (void**)&(dw->mge_desc),
397 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
401 if_printf(sc->ifp, "failed to allocate DMA memory\n");
406 error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
407 dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
408 &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
411 if_printf(sc->ifp, "can't load descriptor\n");
412 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
418 /* Chain descriptors */
419 dw->mge_desc->next_desc = desc_paddr;
420 desc_paddr = dw->mge_desc_paddr;
422 tab[size - 1].mge_desc->next_desc = desc_paddr;
424 /* Allocate a busdma tag for mbufs. */
425 error = bus_dma_tag_create(NULL, /* parent */
426 8, 0, /* alignment, boundary */
427 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
428 BUS_SPACE_MAXADDR, /* highaddr */
429 NULL, NULL, /* filtfunc, filtfuncarg */
430 MCLBYTES, 1, /* maxsize, nsegments */
431 MCLBYTES, 0, /* maxsegsz, flags */
432 NULL, NULL, /* lockfunc, lockfuncarg */
433 buffer_tag); /* dmat */
435 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
439 /* Create TX busdma maps */
440 for (i = 0; i < size; i++) {
442 error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
444 if_printf(sc->ifp, "failed to create map for mbuf\n");
448 dw->buffer = (struct mbuf*)NULL;
449 dw->mge_desc->buffer = (bus_addr_t)NULL;
456 mge_allocate_dma(struct mge_softc *sc)
459 struct mge_desc_wrapper *dw;
462 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
463 error = bus_dma_tag_create(NULL, /* parent */
464 16, 0, /* alignment, boundary */
465 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
466 BUS_SPACE_MAXADDR, /* highaddr */
467 NULL, NULL, /* filtfunc, filtfuncarg */
468 sizeof(struct mge_desc), 1, /* maxsize, nsegments */
469 sizeof(struct mge_desc), 0, /* maxsegsz, flags */
470 NULL, NULL, /* lockfunc, lockfuncarg */
471 &sc->mge_desc_dtag); /* dmat */
474 mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
476 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
479 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
480 dw = &(sc->mge_rx_desc[i]);
481 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
482 &dw->mge_desc->buffer);
485 sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
486 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
492 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
493 uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
495 struct mge_desc_wrapper *dw;
498 for (i = 0; i < size; i++) {
502 if (dw->buffer_dmap) {
504 bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
505 BUS_DMASYNC_POSTREAD);
506 bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
508 bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
512 /* Free RX descriptors */
514 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
515 BUS_DMASYNC_POSTREAD);
516 bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
517 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
524 mge_free_dma(struct mge_softc *sc)
526 /* Free desciptors and mbufs */
527 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
528 mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
530 /* Destroy mbuf dma tag */
531 bus_dma_tag_destroy(sc->mge_tx_dtag);
532 bus_dma_tag_destroy(sc->mge_rx_dtag);
533 /* Destroy descriptors tag */
534 bus_dma_tag_destroy(sc->mge_desc_dtag);
538 mge_reinit_rx(struct mge_softc *sc)
540 struct mge_desc_wrapper *dw;
543 MGE_RECEIVE_LOCK_ASSERT(sc);
545 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
547 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
550 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
551 dw = &(sc->mge_rx_desc[i]);
552 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
553 &dw->mge_desc->buffer);
556 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
557 sc->rx_desc_curr = 0;
559 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
562 /* Enable RX queue */
563 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
566 #ifdef DEVICE_POLLING
567 static poll_handler_t mge_poll;
570 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
572 struct mge_softc *sc = ifp->if_softc;
573 uint32_t int_cause, int_cause_ext;
578 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
579 MGE_GLOBAL_UNLOCK(sc);
583 if (cmd == POLL_AND_CHECK_STATUS) {
584 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
585 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
587 /* Check for resource error */
588 if (int_cause & MGE_PORT_INT_RXERRQ0)
591 if (int_cause || int_cause_ext) {
592 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
593 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
597 mge_intr_tx_locked(sc);
598 rx_npkts = mge_intr_rx_locked(sc, count);
600 MGE_GLOBAL_UNLOCK(sc);
603 #endif /* DEVICE_POLLING */
606 mge_attach(device_t dev)
608 struct mge_softc *sc;
610 uint8_t hwaddr[ETHER_ADDR_LEN];
613 sc = device_get_softc(dev);
616 if (device_get_unit(dev) == 0)
619 /* Set chip version-dependent parameters */
623 * We assume static PHY address <=> device unit mapping:
624 * PHY Address = MII_ADDR_BASE + devce unit.
625 * This is true for most Marvell boards.
627 phy = MII_ADDR_BASE + device_get_unit(dev);
629 /* Initialize mutexes */
630 mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
631 mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
633 /* Allocate IO and IRQ resources */
634 error = bus_alloc_resources(dev, res_spec, sc->res);
636 device_printf(dev, "could not allocate resources\n");
641 /* Allocate DMA, buffers, buffer descriptors */
642 error = mge_allocate_dma(sc);
648 sc->tx_desc_curr = 0;
649 sc->rx_desc_curr = 0;
650 sc->tx_desc_used_idx = 0;
651 sc->tx_desc_used_count = 0;
653 /* Configure defaults for interrupts coalescing */
654 sc->rx_ic_time = 768;
655 sc->tx_ic_time = 768;
658 /* Allocate network interface */
659 ifp = sc->ifp = if_alloc(IFT_ETHER);
661 device_printf(dev, "if_alloc() failed\n");
666 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
668 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
669 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
670 ifp->if_capenable = ifp->if_capabilities;
671 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
673 #ifdef DEVICE_POLLING
674 /* Advertise that polling is supported */
675 ifp->if_capabilities |= IFCAP_POLLING;
678 ifp->if_init = mge_init;
679 ifp->if_start = mge_start;
680 ifp->if_ioctl = mge_ioctl;
682 ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
683 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
684 IFQ_SET_READY(&ifp->if_snd);
686 mge_get_mac_address(sc, hwaddr);
687 ether_ifattach(ifp, hwaddr);
688 callout_init(&sc->wd_callout, 0);
691 error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
692 mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
694 device_printf(dev, "attaching PHYs failed\n");
700 sc->mii = device_get_softc(sc->miibus);
702 /* Attach interrupt handlers */
703 for (i = 0; i < 2; ++i) {
704 error = bus_setup_intr(dev, sc->res[1 + i],
705 INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
706 sc, &sc->ih_cookie[i]);
708 device_printf(dev, "could not setup %s\n",
709 mge_intrs[i].description);
710 ether_ifdetach(sc->ifp);
719 mge_detach(device_t dev)
721 struct mge_softc *sc;
724 sc = device_get_softc(dev);
726 /* Stop controller and free TX queue */
730 /* Wait for stopping ticks */
731 callout_drain(&sc->wd_callout);
733 /* Stop and release all interrupts */
734 for (i = 0; i < 2; ++i) {
735 error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
737 device_printf(dev, "could not release %s\n",
738 mge_intrs[i].description);
741 /* Detach network interface */
743 ether_ifdetach(sc->ifp);
747 /* Free DMA resources */
750 /* Free IO memory handler */
751 bus_release_resources(dev, res_spec, sc->res);
753 /* Destroy mutexes */
754 mtx_destroy(&sc->receive_lock);
755 mtx_destroy(&sc->transmit_lock);
761 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
763 struct mge_softc *sc = ifp->if_softc;
764 struct mii_data *mii;
766 MGE_TRANSMIT_LOCK(sc);
771 ifmr->ifm_active = mii->mii_media_active;
772 ifmr->ifm_status = mii->mii_media_status;
774 MGE_TRANSMIT_UNLOCK(sc);
778 mge_set_port_serial_control(uint32_t media)
780 uint32_t port_config;
782 port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
783 PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
785 if (IFM_TYPE(media) == IFM_ETHER) {
786 switch(IFM_SUBTYPE(media)) {
790 port_config |= (PORT_SERIAL_GMII_SPEED_1000 |
791 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
792 PORT_SERIAL_SPEED_AUTONEG);
795 port_config |= (PORT_SERIAL_MII_SPEED_100 |
796 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
797 PORT_SERIAL_SPEED_AUTONEG);
800 port_config |= (PORT_SERIAL_AUTONEG |
801 PORT_SERIAL_AUTONEG_FC |
802 PORT_SERIAL_SPEED_AUTONEG);
806 port_config |= PORT_SERIAL_FULL_DUPLEX;
808 return (port_config);
812 mge_ifmedia_upd(struct ifnet *ifp)
814 struct mge_softc *sc = ifp->if_softc;
816 if (ifp->if_flags & IFF_UP) {
819 sc->mge_media_status = sc->mii->mii_media.ifm_media;
820 mii_mediachg(sc->mii);
823 MGE_GLOBAL_UNLOCK(sc);
832 struct mge_softc *sc = arg;
836 mge_init_locked(arg);
838 MGE_GLOBAL_UNLOCK(sc);
842 mge_init_locked(void *arg)
844 struct mge_softc *sc = arg;
845 struct mge_desc_wrapper *dw;
846 volatile uint32_t reg_val;
850 MGE_GLOBAL_LOCK_ASSERT(sc);
855 /* Disable interrupts */
856 mge_intrs_ctrl(sc, 0);
858 /* Set MAC address */
859 mge_set_mac_address(sc);
861 /* Setup multicast filters */
862 mge_setup_multicast(sc);
864 if (sc->mge_ver == 2) {
865 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
866 MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
869 /* Initialize TX queue configuration registers */
870 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
871 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
872 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
874 /* Clear TX queue configuration registers for unused queues */
875 for (i = 1; i < 7; i++) {
876 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
877 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
878 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
881 /* Set default MTU */
882 MGE_WRITE(sc, sc->mge_mtu, 0);
884 /* Port configuration */
885 MGE_WRITE(sc, MGE_PORT_CONFIG,
886 PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
887 PORT_CONFIG_ARO_RXQ(0));
888 MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
890 /* Setup port configuration */
891 reg_val = mge_set_port_serial_control(sc->mge_media_status);
892 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
894 /* Setup SDMA configuration */
895 MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
896 MGE_SDMA_TX_BYTE_SWAP |
897 MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
898 MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
900 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
902 MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
903 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
906 /* Reset descriptor indexes */
907 sc->tx_desc_curr = 0;
908 sc->rx_desc_curr = 0;
909 sc->tx_desc_used_idx = 0;
910 sc->tx_desc_used_count = 0;
912 /* Enable RX descriptors */
913 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
914 dw = &sc->mge_rx_desc[i];
915 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
916 dw->mge_desc->buff_size = MCLBYTES;
917 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
918 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
921 /* Enable RX queue */
922 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
925 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
926 reg_val |= PORT_SERIAL_ENABLE;
927 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
930 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
931 if (reg_val & MGE_STATUS_LINKUP)
935 if_printf(sc->ifp, "Timeout on link-up\n");
940 /* Setup interrupts coalescing */
944 /* Enable interrupts */
945 #ifdef DEVICE_POLLING
947 * * ...only if polling is not turned on. Disable interrupts explicitly
948 * if polling is enabled.
950 if (sc->ifp->if_capenable & IFCAP_POLLING)
951 mge_intrs_ctrl(sc, 0);
953 #endif /* DEVICE_POLLING */
954 mge_intrs_ctrl(sc, 1);
956 /* Activate network interface */
957 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
958 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
961 /* Schedule watchdog timeout */
962 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
966 mge_intr_err(void *arg)
968 struct mge_softc *sc = arg;
972 if_printf(ifp, "%s\n", __FUNCTION__);
976 mge_intr_misc(void *arg)
978 struct mge_softc *sc = arg;
982 if_printf(ifp, "%s\n", __FUNCTION__);
986 mge_intr_rx(void *arg) {
987 struct mge_softc *sc = arg;
988 uint32_t int_cause, int_cause_ext;
990 MGE_RECEIVE_LOCK(sc);
992 #ifdef DEVICE_POLLING
993 if (sc->ifp->if_capenable & IFCAP_POLLING) {
994 MGE_RECEIVE_UNLOCK(sc);
999 /* Get interrupt cause */
1000 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1001 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1003 /* Check for resource error */
1004 if (int_cause & MGE_PORT_INT_RXERRQ0) {
1006 MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1007 int_cause & ~MGE_PORT_INT_RXERRQ0);
1010 int_cause &= MGE_PORT_INT_RXQ0;
1011 int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1013 if (int_cause || int_cause_ext) {
1014 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1015 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1016 mge_intr_rx_locked(sc, -1);
1019 MGE_RECEIVE_UNLOCK(sc);
1024 mge_intr_rx_locked(struct mge_softc *sc, int count)
1026 struct ifnet *ifp = sc->ifp;
1029 struct mge_desc_wrapper* dw;
1033 MGE_RECEIVE_LOCK_ASSERT(sc);
1035 while (count != 0) {
1036 dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1037 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1038 BUS_DMASYNC_POSTREAD);
1041 status = dw->mge_desc->cmd_status;
1042 bufsize = dw->mge_desc->buff_size;
1043 if ((status & MGE_DMA_OWNED) != 0)
1046 if (dw->mge_desc->byte_count &&
1047 ~(status & MGE_ERR_SUMMARY)) {
1049 bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1050 BUS_DMASYNC_POSTREAD);
1052 mb = m_devget(dw->buffer->m_data,
1053 dw->mge_desc->byte_count - ETHER_CRC_LEN,
1057 /* Give up if no mbufs */
1061 mb->m_pkthdr.len -= 2;
1064 mge_offload_process_frame(ifp, mb, status,
1067 MGE_RECEIVE_UNLOCK(sc);
1068 (*ifp->if_input)(ifp, mb);
1069 MGE_RECEIVE_LOCK(sc);
1073 dw->mge_desc->byte_count = 0;
1074 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1075 sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1076 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1077 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1087 mge_intr_sum(void *arg)
1089 struct mge_softc *sc = arg;
1093 if_printf(ifp, "%s\n", __FUNCTION__);
1097 mge_intr_tx(void *arg)
1099 struct mge_softc *sc = arg;
1100 uint32_t int_cause_ext;
1102 MGE_TRANSMIT_LOCK(sc);
1104 #ifdef DEVICE_POLLING
1105 if (sc->ifp->if_capenable & IFCAP_POLLING) {
1106 MGE_TRANSMIT_UNLOCK(sc);
1111 /* Ack the interrupt */
1112 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1113 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
1114 int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
1116 mge_intr_tx_locked(sc);
1118 MGE_TRANSMIT_UNLOCK(sc);
1123 mge_intr_tx_locked(struct mge_softc *sc)
1125 struct ifnet *ifp = sc->ifp;
1126 struct mge_desc_wrapper *dw;
1127 struct mge_desc *desc;
1131 MGE_TRANSMIT_LOCK_ASSERT(sc);
1133 /* Disable watchdog */
1136 while (sc->tx_desc_used_count) {
1137 /* Get the descriptor */
1138 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1139 desc = dw->mge_desc;
1140 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1141 BUS_DMASYNC_POSTREAD);
1143 /* Get descriptor status */
1144 status = desc->cmd_status;
1146 if (status & MGE_DMA_OWNED)
1149 sc->tx_desc_used_idx =
1150 (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1151 sc->tx_desc_used_count--;
1153 /* Update collision statistics */
1154 if (status & MGE_ERR_SUMMARY) {
1155 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1156 ifp->if_collisions++;
1157 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1158 ifp->if_collisions += 16;
1161 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1162 BUS_DMASYNC_POSTWRITE);
1163 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1164 m_freem(dw->buffer);
1165 dw->buffer = (struct mbuf*)NULL;
1172 /* Now send anything that was pending */
1173 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1174 mge_start_locked(ifp);
1179 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1181 struct mge_softc *sc = ifp->if_softc;
1182 struct ifreq *ifr = (struct ifreq *)data;
1190 MGE_GLOBAL_LOCK(sc);
1192 if (ifp->if_flags & IFF_UP) {
1193 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1194 flags = ifp->if_flags ^ sc->mge_if_flags;
1195 if (flags & IFF_PROMISC)
1196 mge_set_prom_mode(sc,
1197 MGE_RX_DEFAULT_QUEUE);
1199 if (flags & IFF_ALLMULTI)
1200 mge_setup_multicast(sc);
1202 mge_init_locked(sc);
1204 else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1207 sc->mge_if_flags = ifp->if_flags;
1208 MGE_GLOBAL_UNLOCK(sc);
1212 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1213 MGE_GLOBAL_LOCK(sc);
1214 mge_setup_multicast(sc);
1215 MGE_GLOBAL_UNLOCK(sc);
1219 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1220 if (mask & IFCAP_HWCSUM) {
1221 ifp->if_capenable &= ~IFCAP_HWCSUM;
1222 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1223 if (ifp->if_capenable & IFCAP_TXCSUM)
1224 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1226 ifp->if_hwassist = 0;
1228 #ifdef DEVICE_POLLING
1229 if (mask & IFCAP_POLLING) {
1230 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1231 error = ether_poll_register(mge_poll, ifp);
1235 MGE_GLOBAL_LOCK(sc);
1236 mge_intrs_ctrl(sc, 0);
1237 ifp->if_capenable |= IFCAP_POLLING;
1238 MGE_GLOBAL_UNLOCK(sc);
1240 error = ether_poll_deregister(ifp);
1241 MGE_GLOBAL_LOCK(sc);
1242 mge_intrs_ctrl(sc, 1);
1243 ifp->if_capenable &= ~IFCAP_POLLING;
1244 MGE_GLOBAL_UNLOCK(sc);
1249 case SIOCGIFMEDIA: /* fall through */
1251 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1252 && !(ifr->ifr_media & IFM_FDX)) {
1253 device_printf(sc->dev,
1254 "1000baseTX half-duplex unsupported\n");
1257 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1260 error = ether_ioctl(ifp, command, data);
1266 mge_miibus_readreg(device_t dev, int phy, int reg)
1270 MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1271 (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1273 retries = MGE_SMI_READ_RETRIES;
1274 while (--retries && !(MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_READVALID))
1275 DELAY(MGE_SMI_READ_DELAY);
1278 device_printf(dev, "Timeout while reading from PHY\n");
1280 return (MGE_READ(sc_mge0, MGE_REG_SMI) & 0xffff);
1284 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1288 MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1289 (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1291 retries = MGE_SMI_WRITE_RETRIES;
1292 while (--retries && MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_BUSY)
1293 DELAY(MGE_SMI_WRITE_DELAY);
1296 device_printf(dev, "Timeout while writing to PHY\n");
1301 mge_probe(device_t dev)
1304 device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1305 return (BUS_PROBE_DEFAULT);
1309 mge_resume(device_t dev)
1312 device_printf(dev, "%s\n", __FUNCTION__);
1317 mge_shutdown(device_t dev)
1319 struct mge_softc *sc = device_get_softc(dev);
1321 MGE_GLOBAL_LOCK(sc);
1323 #ifdef DEVICE_POLLING
1324 if (sc->ifp->if_capenable & IFCAP_POLLING)
1325 ether_poll_deregister(sc->ifp);
1330 MGE_GLOBAL_UNLOCK(sc);
1336 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1338 struct mge_desc_wrapper *dw = NULL;
1340 bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1348 /* Check for free descriptors */
1349 if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1350 /* No free descriptors */
1354 /* Fetch unused map */
1355 desc_no = sc->tx_desc_curr;
1356 dw = &sc->mge_tx_desc[desc_no];
1357 mapp = dw->buffer_dmap;
1359 /* Create mapping in DMA memory */
1360 error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1362 if (error != 0 || nsegs != 1 ) {
1363 bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1364 return ((error != 0) ? error : -1);
1367 bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1369 /* Everything is ok, now we can send buffers */
1370 for (seg = 0; seg < nsegs; seg++) {
1371 dw->mge_desc->byte_count = segs[seg].ds_len;
1372 dw->mge_desc->buffer = segs[seg].ds_addr;
1374 dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
1375 MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1379 mge_offload_setup_descriptor(sc, dw);
1382 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1383 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1385 sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1386 sc->tx_desc_used_count++;
1393 struct mge_softc *sc = msc;
1395 /* Check for TX timeout */
1400 /* Check for media type change */
1401 if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1402 mge_ifmedia_upd(sc->ifp);
1404 /* Schedule another timeout one second from now */
1405 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1409 mge_watchdog(struct mge_softc *sc)
1415 MGE_GLOBAL_LOCK(sc);
1417 if (sc->wd_timer == 0 || --sc->wd_timer) {
1418 MGE_GLOBAL_UNLOCK(sc);
1423 if_printf(ifp, "watchdog timeout\n");
1426 mge_init_locked(sc);
1428 MGE_GLOBAL_UNLOCK(sc);
1432 mge_start(struct ifnet *ifp)
1434 struct mge_softc *sc = ifp->if_softc;
1436 MGE_TRANSMIT_LOCK(sc);
1438 mge_start_locked(ifp);
1440 MGE_TRANSMIT_UNLOCK(sc);
1444 mge_start_locked(struct ifnet *ifp)
1446 struct mge_softc *sc;
1447 struct mbuf *m0, *mtmp;
1448 uint32_t reg_val, queued = 0;
1452 MGE_TRANSMIT_LOCK_ASSERT(sc);
1454 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1459 /* Get packet from the queue */
1460 IF_DEQUEUE(&ifp->if_snd, m0);
1464 mtmp = m_defrag(m0, M_DONTWAIT);
1468 if (mge_encap(sc, m0)) {
1469 IF_PREPEND(&ifp->if_snd, m0);
1470 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1478 /* Enable transmitter and watchdog timer */
1479 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1480 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1486 mge_stop(struct mge_softc *sc)
1489 volatile uint32_t reg_val, status;
1490 struct mge_desc_wrapper *dw;
1491 struct mge_desc *desc;
1496 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1499 /* Stop tick engine */
1500 callout_stop(&sc->wd_callout);
1502 /* Disable interface */
1503 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1506 /* Disable interrupts */
1507 mge_intrs_ctrl(sc, 0);
1509 /* Disable Rx and Tx */
1510 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1511 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1512 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1514 /* Remove pending data from TX queue */
1515 while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1516 sc->tx_desc_used_count) {
1517 /* Get the descriptor */
1518 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1519 desc = dw->mge_desc;
1520 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1521 BUS_DMASYNC_POSTREAD);
1523 /* Get descriptor status */
1524 status = desc->cmd_status;
1526 if (status & MGE_DMA_OWNED)
1529 sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1531 sc->tx_desc_used_count--;
1533 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1534 BUS_DMASYNC_POSTWRITE);
1535 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1537 m_freem(dw->buffer);
1538 dw->buffer = (struct mbuf*)NULL;
1541 /* Wait for end of transmission */
1544 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1545 if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1546 (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1552 if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1555 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1556 reg_val &= ~(PORT_SERIAL_ENABLE);
1557 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1561 mge_suspend(device_t dev)
1564 device_printf(dev, "%s\n", __FUNCTION__);
1569 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1570 uint32_t status, uint16_t bufsize)
1574 if (ifp->if_capenable & IFCAP_RXCSUM) {
1575 if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1576 csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1578 if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1579 (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1580 (status & MGE_RX_L4_CSUM_OK)) {
1581 csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1582 frame->m_pkthdr.csum_data = 0xFFFF;
1585 frame->m_pkthdr.csum_flags = csum_flags;
1590 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1592 struct mbuf *m0 = dw->buffer;
1593 struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1594 int csum_flags = m0->m_pkthdr.csum_flags;
1600 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1601 etype = ntohs(eh->evl_proto);
1602 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1603 csum_flags |= MGE_TX_VLAN_TAGGED;
1605 etype = ntohs(eh->evl_encap_proto);
1606 ehlen = ETHER_HDR_LEN;
1609 if (etype != ETHERTYPE_IP) {
1611 "TCP/IP Offload enabled for unsupported "
1616 ip = (struct ip *)(m0->m_data + ehlen);
1617 cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1619 if ((m0->m_flags & M_FRAG) == 0)
1620 cmd_status |= MGE_TX_NOT_FRAGMENT;
1623 if (csum_flags & CSUM_IP)
1624 cmd_status |= MGE_TX_GEN_IP_CSUM;
1626 if (csum_flags & CSUM_TCP)
1627 cmd_status |= MGE_TX_GEN_L4_CSUM;
1629 if (csum_flags & CSUM_UDP)
1630 cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1632 dw->mge_desc->cmd_status |= cmd_status;
1636 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1640 MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1641 MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1642 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1643 MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1644 MGE_PORT_INT_EXT_TXBUF0);
1646 MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1647 MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1649 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1650 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1652 MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1653 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1658 mge_crc8(uint8_t *data, int size)
1661 static const uint8_t ct[256] = {
1662 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1663 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1664 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1665 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1666 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1667 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1668 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1669 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1670 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1671 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1672 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1673 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1674 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1675 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1676 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1677 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1678 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1679 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1680 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1681 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1682 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1683 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1684 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1685 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1686 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1687 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1688 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1689 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1690 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1691 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1692 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1693 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1697 crc = ct[crc ^ *(data++)];
1703 mge_setup_multicast(struct mge_softc *sc)
1705 uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1706 uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1707 uint32_t smt[MGE_MCAST_REG_NUMBER];
1708 uint32_t omt[MGE_MCAST_REG_NUMBER];
1709 struct ifnet *ifp = sc->ifp;
1710 struct ifmultiaddr *ifma;
1714 if (ifp->if_flags & IFF_ALLMULTI) {
1715 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1716 smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1718 memset(smt, 0, sizeof(smt));
1719 memset(omt, 0, sizeof(omt));
1721 if_maddr_rlock(ifp);
1722 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1723 if (ifma->ifma_addr->sa_family != AF_LINK)
1726 mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1727 if (memcmp(mac, special, sizeof(special)) == 0) {
1729 smt[i >> 2] |= v << ((i & 0x03) << 3);
1731 i = mge_crc8(mac, ETHER_ADDR_LEN);
1732 omt[i >> 2] |= v << ((i & 0x03) << 3);
1735 if_maddr_runlock(ifp);
1738 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1739 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1740 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1745 mge_set_rxic(struct mge_softc *sc)
1749 if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1750 sc->rx_ic_time = sc->mge_rx_ipg_max;
1752 reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1753 reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1754 reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1755 MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1759 mge_set_txic(struct mge_softc *sc)
1763 if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1764 sc->tx_ic_time = sc->mge_tfut_ipg_max;
1766 reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1767 reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1768 reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1769 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1773 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1775 struct mge_softc *sc = (struct mge_softc *)arg1;
1779 time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1780 error = sysctl_handle_int(oidp, &time, 0, req);
1784 MGE_GLOBAL_LOCK(sc);
1785 if (arg2 == MGE_IC_RX) {
1786 sc->rx_ic_time = time;
1789 sc->tx_ic_time = time;
1792 MGE_GLOBAL_UNLOCK(sc);
1798 mge_add_sysctls(struct mge_softc *sc)
1800 struct sysctl_ctx_list *ctx;
1801 struct sysctl_oid_list *children;
1802 struct sysctl_oid *tree;
1804 ctx = device_get_sysctl_ctx(sc->dev);
1805 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1806 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1807 CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1808 children = SYSCTL_CHILDREN(tree);
1810 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1811 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1812 "I", "IC RX time threshold");
1813 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1814 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1815 "I", "IC TX time threshold");