2 * Copyright (c) 2015, Stanislav Galabov
3 * Copyright (c) 2014, Aleksandr A. Mityaev
4 * Copyright (c) 2011, Aleksandr Rybalko
6 * by Alexander Egorenkov <egorenar@gmail.com>
7 * and by Damien Bergamini <damien.bergamini@free.fr>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice unmodified, this list of conditions, and the following
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
40 #include <net/if_var.h>
41 #include <net/if_arp.h>
42 #include <net/ethernet.h>
43 #include <net/if_dl.h>
44 #include <net/if_media.h>
45 #include <net/if_types.h>
46 #include <net/if_vlan_var.h>
50 #include <machine/bus.h>
51 #include <machine/cache.h>
52 #include <machine/cpufunc.h>
53 #include <machine/resource.h>
54 #include <vm/vm_param.h>
60 #include "opt_platform.h"
61 #include "opt_rt305x.h"
64 #include <dev/ofw/openfirm.h>
65 #include <dev/ofw/ofw_bus.h>
66 #include <dev/ofw/ofw_bus_subr.h>
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
72 #include <mips/rt305x/rt305x_sysctlvar.h>
73 #include <mips/rt305x/rt305xreg.h>
75 #ifdef IF_RT_PHY_SUPPORT
76 #include "miibus_if.h"
82 #define RT_MAX_AGG_SIZE 3840
84 #define RT_TX_DATA_SEG0_SIZE MJUMPAGESIZE
86 #define RT_MS(_v, _f) (((_v) & _f) >> _f##_S)
87 #define RT_SM(_v, _f) (((_v) << _f##_S) & _f)
89 #define RT_TX_WATCHDOG_TIMEOUT 5
91 #define RT_CHIPID_RT3050 0x3050
92 #define RT_CHIPID_RT3052 0x3052
93 #define RT_CHIPID_RT5350 0x5350
94 #define RT_CHIPID_RT6855 0x6855
95 #define RT_CHIPID_MT7620 0x7620
98 /* more specific and new models should go first */
99 static const struct ofw_compat_data rt_compat_data[] = {
100 { "ralink,rt6855-eth", (uintptr_t)RT_CHIPID_RT6855 },
101 { "ralink,rt5350-eth", (uintptr_t)RT_CHIPID_RT5350 },
102 { "ralink,rt3052-eth", (uintptr_t)RT_CHIPID_RT3052 },
103 { "ralink,rt305x-eth", (uintptr_t)RT_CHIPID_RT3050 },
104 { NULL, (uintptr_t)NULL }
109 * Static function prototypes
111 static int rt_probe(device_t dev);
112 static int rt_attach(device_t dev);
113 static int rt_detach(device_t dev);
114 static int rt_shutdown(device_t dev);
115 static int rt_suspend(device_t dev);
116 static int rt_resume(device_t dev);
117 static void rt_init_locked(void *priv);
118 static void rt_init(void *priv);
119 static void rt_stop_locked(void *priv);
120 static void rt_stop(void *priv);
121 static void rt_start(struct ifnet *ifp);
122 static int rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
123 static void rt_periodic(void *arg);
124 static void rt_tx_watchdog(void *arg);
125 static void rt_intr(void *arg);
126 static void rt_rt5350_intr(void *arg);
127 static void rt_tx_coherent_intr(struct rt_softc *sc);
128 static void rt_rx_coherent_intr(struct rt_softc *sc);
129 static void rt_rx_delay_intr(struct rt_softc *sc);
130 static void rt_tx_delay_intr(struct rt_softc *sc);
131 static void rt_rx_intr(struct rt_softc *sc, int qid);
132 static void rt_tx_intr(struct rt_softc *sc, int qid);
133 static void rt_rx_done_task(void *context, int pending);
134 static void rt_tx_done_task(void *context, int pending);
135 static void rt_periodic_task(void *context, int pending);
136 static int rt_rx_eof(struct rt_softc *sc,
137 struct rt_softc_rx_ring *ring, int limit);
138 static void rt_tx_eof(struct rt_softc *sc,
139 struct rt_softc_tx_ring *ring);
140 static void rt_update_stats(struct rt_softc *sc);
141 static void rt_watchdog(struct rt_softc *sc);
142 static void rt_update_raw_counters(struct rt_softc *sc);
143 static void rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask);
144 static void rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask);
145 static int rt_txrx_enable(struct rt_softc *sc);
146 static int rt_alloc_rx_ring(struct rt_softc *sc,
147 struct rt_softc_rx_ring *ring, int qid);
148 static void rt_reset_rx_ring(struct rt_softc *sc,
149 struct rt_softc_rx_ring *ring);
150 static void rt_free_rx_ring(struct rt_softc *sc,
151 struct rt_softc_rx_ring *ring);
152 static int rt_alloc_tx_ring(struct rt_softc *sc,
153 struct rt_softc_tx_ring *ring, int qid);
154 static void rt_reset_tx_ring(struct rt_softc *sc,
155 struct rt_softc_tx_ring *ring);
156 static void rt_free_tx_ring(struct rt_softc *sc,
157 struct rt_softc_tx_ring *ring);
158 static void rt_dma_map_addr(void *arg, bus_dma_segment_t *segs,
159 int nseg, int error);
160 static void rt_sysctl_attach(struct rt_softc *sc);
161 #ifdef IF_RT_PHY_SUPPORT
162 void rt_miibus_statchg(device_t);
163 static int rt_miibus_readreg(device_t, int, int);
164 static int rt_miibus_writereg(device_t, int, int, int);
166 static int rt_ifmedia_upd(struct ifnet *);
167 static void rt_ifmedia_sts(struct ifnet *, struct ifmediareq *);
169 static SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters");
171 static int rt_debug = 0;
172 SYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RWTUN, &rt_debug, 0,
177 rt_probe(device_t dev)
179 struct rt_softc *sc = device_get_softc(dev);
182 const struct ofw_compat_data * cd;
184 cd = ofw_bus_search_compatible(dev, rt_compat_data);
185 if (cd->ocd_data == (uintptr_t)NULL)
188 sc->rt_chipid = (unsigned int)(cd->ocd_data);
191 sc->rt_chipid = RT_CHIPID_MT7620;
192 #elif defined(RT5350)
193 sc->rt_chipid = RT_CHIPID_RT5350;
195 sc->rt_chipid = RT_CHIPID_RT3050;
198 snprintf(buf, sizeof(buf), "Ralink RT%x onChip Ethernet driver",
200 device_set_desc_copy(dev, buf);
201 return (BUS_PROBE_GENERIC);
205 * macaddr_atoi - translate string MAC address to uint8_t array
208 macaddr_atoi(const char *str, uint8_t *mac)
211 unsigned int amac[ETHER_ADDR_LEN]; /* Aligned version */
213 count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
214 &amac[0], &amac[1], &amac[2],
215 &amac[3], &amac[4], &amac[5]);
216 if (count < ETHER_ADDR_LEN) {
217 memset(mac, 0, ETHER_ADDR_LEN);
221 /* Copy aligned to result */
222 for (i = 0; i < ETHER_ADDR_LEN; i ++)
223 mac[i] = (amac[i] & 0xff);
228 #ifdef USE_GENERATED_MAC_ADDRESS
230 * generate_mac(uin8_t *mac)
231 * This is MAC address generator for cases when real device MAC address
232 * unknown or not yet accessible.
233 * Use 'b','s','d' signature and 3 octets from CRC32 on kenv.
234 * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0]
236 * Output - MAC address, that do not change between reboots, if hints or
237 * bootloader info unchange.
240 generate_mac(uint8_t *mac)
244 uint32_t crc = 0xffffffff;
246 /* Generate CRC32 on kenv */
247 for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) {
248 crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
255 mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff);
256 mac[4] = (crc >> 8) & 0xff;
262 * ether_request_mac - try to find usable MAC address.
265 ether_request_mac(device_t dev, uint8_t *mac)
270 * "ethaddr" is passed via envp on RedBoot platforms
271 * "kmac" is passed via argv on RouterBOOT platforms
273 #if defined(RT305X_UBOOT) || defined(__REDBOOT__) || defined(__ROUTERBOOT__)
274 if ((var = kern_getenv("ethaddr")) != NULL ||
275 (var = kern_getenv("kmac")) != NULL ) {
277 if(!macaddr_atoi(var, mac)) {
278 printf("%s: use %s macaddr from KENV\n",
279 device_get_nameunit(dev), var);
289 * hint.[dev].[unit].macaddr
291 if (!resource_string_value(device_get_name(dev),
292 device_get_unit(dev), "macaddr", (const char **)&var)) {
294 if(!macaddr_atoi(var, mac)) {
295 printf("%s: use %s macaddr from hints\n",
296 device_get_nameunit(dev), var);
301 #ifdef USE_GENERATED_MAC_ADDRESS
304 device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x "
305 "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
315 device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n");
325 reset_freng(struct rt_softc *sc)
327 /* XXX hard reset kills everything so skip it ... */
332 rt_attach(device_t dev)
338 sc = device_get_softc(dev);
341 mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
342 MTX_DEF | MTX_RECURSE);
345 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
347 if (sc->mem == NULL) {
348 device_printf(dev, "could not allocate memory resource\n");
353 sc->bst = rman_get_bustag(sc->mem);
354 sc->bsh = rman_get_bushandle(sc->mem);
357 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
359 if (sc->irq == NULL) {
361 "could not allocate interrupt resource\n");
367 sc->debug = rt_debug;
369 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
370 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
371 "debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level");
377 /* Fill in soc-specific registers map */
378 switch(sc->rt_chipid) {
379 case RT_CHIPID_MT7620:
380 case RT_CHIPID_RT5350:
381 device_printf(dev, "RT%x Ethernet MAC (rev 0x%08x)\n",
382 sc->rt_chipid, sc->mac_rev);
383 /* RT5350: No GDMA, PSE, CDMA, PPE */
384 RT_WRITE(sc, GE_PORT_BASE + 0x0C00, // UDPCS, TCPCS, IPCS=1
385 RT_READ(sc, GE_PORT_BASE + 0x0C00) | (0x7<<16));
386 sc->delay_int_cfg=RT5350_PDMA_BASE+RT5350_DELAY_INT_CFG;
387 sc->fe_int_status=RT5350_FE_INT_STATUS;
388 sc->fe_int_enable=RT5350_FE_INT_ENABLE;
389 sc->pdma_glo_cfg=RT5350_PDMA_BASE+RT5350_PDMA_GLO_CFG;
390 sc->pdma_rst_idx=RT5350_PDMA_BASE+RT5350_PDMA_RST_IDX;
391 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
392 sc->tx_base_ptr[i]=RT5350_PDMA_BASE+RT5350_TX_BASE_PTR(i);
393 sc->tx_max_cnt[i]=RT5350_PDMA_BASE+RT5350_TX_MAX_CNT(i);
394 sc->tx_ctx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_CTX_IDX(i);
395 sc->tx_dtx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_DTX_IDX(i);
398 sc->rx_base_ptr[0]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR0;
399 sc->rx_max_cnt[0]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT0;
400 sc->rx_calc_idx[0]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX0;
401 sc->rx_drx_idx[0]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX0;
402 sc->rx_base_ptr[1]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR1;
403 sc->rx_max_cnt[1]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT1;
404 sc->rx_calc_idx[1]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX1;
405 sc->rx_drx_idx[1]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX1;
406 sc->int_rx_done_mask=RT5350_INT_RXQ0_DONE;
407 sc->int_tx_done_mask=RT5350_INT_TXQ0_DONE;
409 case RT_CHIPID_RT6855:
410 device_printf(dev, "RT6855 Ethernet MAC (rev 0x%08x)\n",
414 device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n",
416 RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
418 GDM_ICS_EN | /* Enable IP Csum */
419 GDM_TCS_EN | /* Enable TCP Csum */
420 GDM_UCS_EN | /* Enable UDP Csum */
421 GDM_STRPCRC | /* Strip CRC from packet */
422 GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
423 GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
424 GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
425 GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT /* fwd Other to CPU */
428 sc->delay_int_cfg=PDMA_BASE+DELAY_INT_CFG;
429 sc->fe_int_status=GE_PORT_BASE+FE_INT_STATUS;
430 sc->fe_int_enable=GE_PORT_BASE+FE_INT_ENABLE;
431 sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG;
432 sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG;
433 sc->pdma_rst_idx=PDMA_BASE+PDMA_RST_IDX;
434 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
435 sc->tx_base_ptr[i]=PDMA_BASE+TX_BASE_PTR(i);
436 sc->tx_max_cnt[i]=PDMA_BASE+TX_MAX_CNT(i);
437 sc->tx_ctx_idx[i]=PDMA_BASE+TX_CTX_IDX(i);
438 sc->tx_dtx_idx[i]=PDMA_BASE+TX_DTX_IDX(i);
441 sc->rx_base_ptr[0]=PDMA_BASE+RX_BASE_PTR0;
442 sc->rx_max_cnt[0]=PDMA_BASE+RX_MAX_CNT0;
443 sc->rx_calc_idx[0]=PDMA_BASE+RX_CALC_IDX0;
444 sc->rx_drx_idx[0]=PDMA_BASE+RX_DRX_IDX0;
445 sc->int_rx_done_mask=INT_RX_DONE;
446 sc->int_tx_done_mask=INT_TXQ0_DONE;
449 /* allocate Tx and Rx rings */
450 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
451 error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i);
453 device_printf(dev, "could not allocate Tx ring #%d\n",
459 sc->tx_ring_mgtqid = 5;
460 for (i = 0; i < sc->rx_ring_count; i++) {
461 error = rt_alloc_rx_ring(sc, &sc->rx_ring[i], i);
463 device_printf(dev, "could not allocate Rx ring\n");
468 callout_init(&sc->periodic_ch, 0);
469 callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0);
471 ifp = sc->ifp = if_alloc(IFT_ETHER);
473 device_printf(dev, "could not if_alloc()\n");
479 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
480 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
481 ifp->if_init = rt_init;
482 ifp->if_ioctl = rt_ioctl;
483 ifp->if_start = rt_start;
484 #define RT_TX_QLEN 256
486 IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN);
487 ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN;
488 IFQ_SET_READY(&ifp->if_snd);
490 #ifdef IF_RT_PHY_SUPPORT
491 error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd,
492 rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
494 device_printf(dev, "attaching PHYs failed\n");
499 ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts);
500 ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0,
502 ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX);
504 #endif /* IF_RT_PHY_SUPPORT */
506 ether_request_mac(dev, sc->mac_addr);
507 ether_ifattach(ifp, sc->mac_addr);
510 * Tell the upper layer(s) we support long frames.
512 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
513 ifp->if_capabilities |= IFCAP_VLAN_MTU;
514 ifp->if_capenable |= IFCAP_VLAN_MTU;
515 ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM;
516 ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM;
518 /* init task queue */
519 TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
520 TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc);
521 TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc);
523 sc->rx_process_limit = 100;
525 sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT,
526 taskqueue_thread_enqueue, &sc->taskqueue);
528 taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq",
529 device_get_nameunit(sc->dev));
531 rt_sysctl_attach(sc);
533 /* set up interrupt */
534 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
535 NULL, (sc->rt_chipid == RT_CHIPID_RT5350 ||
536 sc->rt_chipid == RT_CHIPID_MT7620) ? rt_rt5350_intr : rt_intr,
539 printf("%s: could not set up interrupt\n",
540 device_get_nameunit(dev));
544 device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug));
550 /* free Tx and Rx rings */
551 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
552 rt_free_tx_ring(sc, &sc->tx_ring[i]);
554 for (i = 0; i < sc->rx_ring_count; i++)
555 rt_free_rx_ring(sc, &sc->rx_ring[i]);
557 mtx_destroy(&sc->lock);
560 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
564 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid,
574 rt_ifmedia_upd(struct ifnet *ifp)
577 #ifdef IF_RT_PHY_SUPPORT
578 struct mii_data *mii;
579 struct mii_softc *miisc;
585 mii = device_get_softc(sc->rt_miibus);
586 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
588 error = mii_mediachg(mii);
593 #else /* !IF_RT_PHY_SUPPORT */
596 struct ifmedia_entry *ife;
599 ifm = &sc->rt_ifmedia;
602 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
605 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
606 device_printf(sc->dev,
607 "AUTO is not supported for multiphy MAC");
615 #endif /* IF_RT_PHY_SUPPORT */
619 * Report current media status.
622 rt_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
624 #ifdef IF_RT_PHY_SUPPORT
626 struct mii_data *mii;
631 mii = device_get_softc(sc->rt_miibus);
633 ifmr->ifm_active = mii->mii_media_active;
634 ifmr->ifm_status = mii->mii_media_status;
635 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
636 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
638 #else /* !IF_RT_PHY_SUPPORT */
640 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
641 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
642 #endif /* IF_RT_PHY_SUPPORT */
646 rt_detach(device_t dev)
652 sc = device_get_softc(dev);
655 RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n");
659 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
661 callout_stop(&sc->periodic_ch);
662 callout_stop(&sc->tx_watchdog_ch);
664 taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
665 taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
666 taskqueue_drain(sc->taskqueue, &sc->periodic_task);
668 /* free Tx and Rx rings */
669 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
670 rt_free_tx_ring(sc, &sc->tx_ring[i]);
671 for (i = 0; i < sc->rx_ring_count; i++)
672 rt_free_rx_ring(sc, &sc->rx_ring[i]);
676 #ifdef IF_RT_PHY_SUPPORT
677 if (sc->rt_miibus != NULL)
678 device_delete_child(dev, sc->rt_miibus);
684 taskqueue_free(sc->taskqueue);
686 mtx_destroy(&sc->lock);
688 bus_generic_detach(dev);
689 bus_teardown_intr(dev, sc->irq, sc->irqh);
690 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
691 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
697 rt_shutdown(device_t dev)
701 sc = device_get_softc(dev);
702 RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n");
709 rt_suspend(device_t dev)
713 sc = device_get_softc(dev);
714 RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n");
721 rt_resume(device_t dev)
726 sc = device_get_softc(dev);
729 RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n");
731 if (ifp->if_flags & IFF_UP)
738 * rt_init_locked - Run initialization process having locked mtx.
741 rt_init_locked(void *priv)
745 #ifdef IF_RT_PHY_SUPPORT
746 struct mii_data *mii;
753 #ifdef IF_RT_PHY_SUPPORT
754 mii = device_get_softc(sc->rt_miibus);
757 RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n");
759 RT_SOFTC_ASSERT_LOCKED(sc);
762 //RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
763 //rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG);
765 /* Fwd to CPU (uni|broad|multi)cast and Unknown */
766 if(sc->rt_chipid == RT_CHIPID_RT3050 || sc->rt_chipid == RT_CHIPID_RT3052)
767 RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
769 GDM_ICS_EN | /* Enable IP Csum */
770 GDM_TCS_EN | /* Enable TCP Csum */
771 GDM_UCS_EN | /* Enable UDP Csum */
772 GDM_STRPCRC | /* Strip CRC from packet */
773 GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
774 GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
775 GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
776 GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT /* Forward Other to CPU */
779 /* disable DMA engine */
780 RT_WRITE(sc, sc->pdma_glo_cfg, 0);
781 RT_WRITE(sc, sc->pdma_rst_idx, 0xffffffff);
783 /* wait while DMA engine is busy */
784 for (ntries = 0; ntries < 100; ntries++) {
785 tmp = RT_READ(sc, sc->pdma_glo_cfg);
786 if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
792 device_printf(sc->dev, "timeout waiting for DMA engine\n");
796 /* reset Rx and Tx rings */
797 tmp = FE_RST_DRX_IDX0 |
803 RT_WRITE(sc, sc->pdma_rst_idx, tmp);
805 /* XXX switch set mac address */
806 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
807 rt_reset_tx_ring(sc, &sc->tx_ring[i]);
809 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
810 /* update TX_BASE_PTRx */
811 RT_WRITE(sc, sc->tx_base_ptr[i],
812 sc->tx_ring[i].desc_phys_addr);
813 RT_WRITE(sc, sc->tx_max_cnt[i],
814 RT_SOFTC_TX_RING_DESC_COUNT);
815 RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
819 for (i = 0; i < sc->rx_ring_count; i++)
820 rt_reset_rx_ring(sc, &sc->rx_ring[i]);
822 /* update RX_BASE_PTRx */
823 for (i = 0; i < sc->rx_ring_count; i++) {
824 RT_WRITE(sc, sc->rx_base_ptr[i],
825 sc->rx_ring[i].desc_phys_addr);
826 RT_WRITE(sc, sc->rx_max_cnt[i],
827 RT_SOFTC_RX_RING_DATA_COUNT);
828 RT_WRITE(sc, sc->rx_calc_idx[i],
829 RT_SOFTC_RX_RING_DATA_COUNT - 1);
832 /* write back DDONE, 16byte burst enable RX/TX DMA */
833 tmp = FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN;
834 if (sc->rt_chipid == RT_CHIPID_MT7620)
836 RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
838 /* disable interrupts mitigation */
839 RT_WRITE(sc, sc->delay_int_cfg, 0);
841 /* clear pending interrupts */
842 RT_WRITE(sc, sc->fe_int_status, 0xffffffff);
844 /* enable interrupts */
845 if (sc->rt_chipid == RT_CHIPID_RT5350 ||
846 sc->rt_chipid == RT_CHIPID_MT7620)
847 tmp = RT5350_INT_TX_COHERENT |
848 RT5350_INT_RX_COHERENT |
849 RT5350_INT_TXQ3_DONE |
850 RT5350_INT_TXQ2_DONE |
851 RT5350_INT_TXQ1_DONE |
852 RT5350_INT_TXQ0_DONE |
853 RT5350_INT_RXQ1_DONE |
854 RT5350_INT_RXQ0_DONE;
873 sc->intr_enable_mask = tmp;
875 RT_WRITE(sc, sc->fe_int_enable, tmp);
877 if (rt_txrx_enable(sc) != 0)
880 #ifdef IF_RT_PHY_SUPPORT
881 if (mii) mii_mediachg(mii);
882 #endif /* IF_RT_PHY_SUPPORT */
884 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
885 ifp->if_drv_flags |= IFF_DRV_RUNNING;
887 sc->periodic_round = 0;
889 callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
898 * rt_init - lock and initialize device.
912 * rt_stop_locked - stop TX/RX w/ lock
915 rt_stop_locked(void *priv)
923 RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n");
925 RT_SOFTC_ASSERT_LOCKED(sc);
927 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
928 callout_stop(&sc->periodic_ch);
929 callout_stop(&sc->tx_watchdog_ch);
931 taskqueue_block(sc->taskqueue);
934 * Sometime rt_stop_locked called from isr and we get panic
935 * When found, I fix it
938 taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
939 taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
940 taskqueue_drain(sc->taskqueue, &sc->periodic_task);
944 /* disable interrupts */
945 RT_WRITE(sc, sc->fe_int_enable, 0);
947 if(sc->rt_chipid == RT_CHIPID_RT5350 ||
948 sc->rt_chipid == RT_CHIPID_MT7620) {
951 RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
953 RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
955 GDM_ICS_EN | /* Enable IP Csum */
956 GDM_TCS_EN | /* Enable TCP Csum */
957 GDM_UCS_EN | /* Enable UDP Csum */
958 GDM_STRPCRC | /* Strip CRC from packet */
959 GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
960 GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
961 GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
962 GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT /* Forward Other to CPU */
979 * rt_tx_data - transmit packet.
982 rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid)
985 struct rt_softc_tx_ring *ring;
986 struct rt_softc_tx_data *data;
987 struct rt_txdesc *desc;
989 bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER];
990 int error, ndmasegs, ndescs, i;
992 KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
993 ("%s: Tx data: invalid qid=%d\n",
994 device_get_nameunit(sc->dev), qid));
996 RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]);
999 ring = &sc->tx_ring[qid];
1000 desc = &ring->desc[ring->desc_cur];
1001 data = &ring->data[ring->data_cur];
1003 error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m,
1004 dma_seg, &ndmasegs, 0);
1006 /* too many fragments, linearize */
1008 RT_DPRINTF(sc, RT_DEBUG_TX,
1009 "could not load mbuf DMA map, trying to linearize "
1010 "mbuf: ndmasegs=%d, len=%d, error=%d\n",
1011 ndmasegs, m->m_pkthdr.len, error);
1013 m_d = m_collapse(m, M_NOWAIT, 16);
1021 sc->tx_defrag_packets++;
1023 error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1024 data->dma_map, m, dma_seg, &ndmasegs, 0);
1026 device_printf(sc->dev, "could not load mbuf DMA map: "
1027 "ndmasegs=%d, len=%d, error=%d\n",
1028 ndmasegs, m->m_pkthdr.len, error);
1034 if (m->m_pkthdr.len == 0)
1037 /* determine how many Tx descs are required */
1038 ndescs = 1 + ndmasegs / 2;
1039 if ((ring->desc_queued + ndescs) >
1040 (RT_SOFTC_TX_RING_DESC_COUNT - 2)) {
1041 RT_DPRINTF(sc, RT_DEBUG_TX,
1042 "there are not enough Tx descs\n");
1044 sc->no_tx_desc_avail++;
1046 bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1053 /* set up Tx descs */
1054 for (i = 0; i < ndmasegs; i += 2) {
1056 /* TODO: this needs to be refined as MT7620 for example has
1057 * a different word3 layout than RT305x and RT5350 (the last
1058 * one doesn't use word3 at all).
1061 /* Set destination */
1062 if (sc->rt_chipid != RT_CHIPID_MT7620)
1063 desc->dst = (TXDSCR_DST_PORT_GDMA1);
1065 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1066 desc->dst |= (TXDSCR_IP_CSUM_GEN|TXDSCR_UDP_CSUM_GEN|
1067 TXDSCR_TCP_CSUM_GEN);
1075 desc->sdp0 = htole32(dma_seg[i].ds_addr);
1076 desc->sdl0 = htole16(dma_seg[i].ds_len |
1077 ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 ));
1079 if ((i+1) < ndmasegs) {
1080 desc->sdp1 = htole32(dma_seg[i+1].ds_addr);
1081 desc->sdl1 = htole16(dma_seg[i+1].ds_len |
1082 ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 ));
1088 if ((i+2) < ndmasegs) {
1089 ring->desc_queued++;
1090 ring->desc_cur = (ring->desc_cur + 1) %
1091 RT_SOFTC_TX_RING_DESC_COUNT;
1093 desc = &ring->desc[ring->desc_cur];
1096 RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, "
1097 "DMA ds_len=%d/%d/%d/%d/%d\n",
1098 m->m_pkthdr.len, ndmasegs,
1099 (int) dma_seg[0].ds_len,
1100 (int) dma_seg[1].ds_len,
1101 (int) dma_seg[2].ds_len,
1102 (int) dma_seg[3].ds_len,
1103 (int) dma_seg[4].ds_len);
1105 bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
1106 BUS_DMASYNC_PREWRITE);
1107 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1108 BUS_DMASYNC_PREWRITE);
1109 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1110 BUS_DMASYNC_PREWRITE);
1112 ring->desc_queued++;
1113 ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT;
1115 ring->data_queued++;
1116 ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT;
1119 RT_WRITE(sc, sc->tx_ctx_idx[qid], ring->desc_cur);
1125 * rt_start - start Transmit/Receive
1128 rt_start(struct ifnet *ifp)
1130 struct rt_softc *sc;
1132 int qid = 0 /* XXX must check QoS priority */;
1136 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1140 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1144 m->m_pkthdr.rcvif = NULL;
1146 RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]);
1148 if (sc->tx_ring[qid].data_queued >=
1149 RT_SOFTC_TX_RING_DATA_COUNT) {
1150 RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1152 RT_DPRINTF(sc, RT_DEBUG_TX,
1153 "if_start: Tx ring with qid=%d is full\n", qid);
1157 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1158 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1160 sc->tx_data_queue_full[qid]++;
1165 if (rt_tx_data(sc, m, qid) != 0) {
1166 RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1168 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1173 RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1174 sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT;
1175 callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1180 * rt_update_promisc - set/clear promiscuous mode. Unused yet, because
1181 * filtering done by attached Ethernet switch.
1184 rt_update_promisc(struct ifnet *ifp)
1186 struct rt_softc *sc;
1189 printf("%s: %s promiscuous mode\n",
1190 device_get_nameunit(sc->dev),
1191 (ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
1195 * rt_ioctl - ioctl handler.
1198 rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1200 struct rt_softc *sc;
1202 #ifdef IF_RT_PHY_SUPPORT
1203 struct mii_data *mii;
1204 #endif /* IF_RT_PHY_SUPPORT */
1205 int error, startall;
1208 ifr = (struct ifreq *) data;
1216 if (ifp->if_flags & IFF_UP) {
1217 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1218 if ((ifp->if_flags ^ sc->if_flags) &
1220 rt_update_promisc(ifp);
1226 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1229 sc->if_flags = ifp->if_flags;
1230 RT_SOFTC_UNLOCK(sc);
1234 #ifdef IF_RT_PHY_SUPPORT
1235 mii = device_get_softc(sc->rt_miibus);
1236 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1238 error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd);
1239 #endif /* IF_RT_PHY_SUPPORT */
1242 error = ether_ioctl(ifp, cmd, data);
1249 * rt_periodic - Handler of PERIODIC interrupt
1252 rt_periodic(void *arg)
1254 struct rt_softc *sc;
1257 RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic\n");
1258 taskqueue_enqueue(sc->taskqueue, &sc->periodic_task);
1262 * rt_tx_watchdog - Handler of TX Watchdog
1265 rt_tx_watchdog(void *arg)
1267 struct rt_softc *sc;
1273 if (sc->tx_timer == 0)
1276 if (--sc->tx_timer == 0) {
1277 device_printf(sc->dev, "Tx watchdog timeout: resetting\n");
1280 * XXX: Commented out, because reset break input.
1285 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1286 sc->tx_watchdog_timeouts++;
1288 callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1292 * rt_cnt_ppe_af - Handler of PPE Counter Table Almost Full interrupt
1295 rt_cnt_ppe_af(struct rt_softc *sc)
1298 RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n");
1302 * rt_cnt_gdm_af - Handler of GDMA 1 & 2 Counter Table Almost Full interrupt
1305 rt_cnt_gdm_af(struct rt_softc *sc)
1308 RT_DPRINTF(sc, RT_DEBUG_INTR,
1309 "GDMA 1 & 2 Counter Table Almost Full\n");
1313 * rt_pse_p2_fc - Handler of PSE port2 (GDMA 2) flow control interrupt
1316 rt_pse_p2_fc(struct rt_softc *sc)
1319 RT_DPRINTF(sc, RT_DEBUG_INTR,
1320 "PSE port2 (GDMA 2) flow control asserted.\n");
1324 * rt_gdm_crc_drop - Handler of GDMA 1/2 discard a packet due to CRC error
1328 rt_gdm_crc_drop(struct rt_softc *sc)
1331 RT_DPRINTF(sc, RT_DEBUG_INTR,
1332 "GDMA 1 & 2 discard a packet due to CRC error\n");
1336 * rt_pse_buf_drop - Handler of buffer sharing limitation interrupt
1339 rt_pse_buf_drop(struct rt_softc *sc)
1342 RT_DPRINTF(sc, RT_DEBUG_INTR,
1343 "PSE discards a packet due to buffer sharing limitation\n");
1347 * rt_gdm_other_drop - Handler of discard on other reason interrupt
1350 rt_gdm_other_drop(struct rt_softc *sc)
1353 RT_DPRINTF(sc, RT_DEBUG_INTR,
1354 "GDMA 1 & 2 discard a packet due to other reason\n");
1358 * rt_pse_p1_fc - Handler of PSE port1 (GDMA 1) flow control interrupt
1361 rt_pse_p1_fc(struct rt_softc *sc)
1364 RT_DPRINTF(sc, RT_DEBUG_INTR,
1365 "PSE port1 (GDMA 1) flow control asserted.\n");
1369 * rt_pse_p0_fc - Handler of PSE port0 (CDMA) flow control interrupt
1372 rt_pse_p0_fc(struct rt_softc *sc)
1375 RT_DPRINTF(sc, RT_DEBUG_INTR,
1376 "PSE port0 (CDMA) flow control asserted.\n");
1380 * rt_pse_fq_empty - Handler of PSE free Q empty threshold reached interrupt
1383 rt_pse_fq_empty(struct rt_softc *sc)
1386 RT_DPRINTF(sc, RT_DEBUG_INTR,
1387 "PSE free Q empty threshold reached & forced drop "
1388 "condition occurred.\n");
1392 * rt_intr - main ISR
1397 struct rt_softc *sc;
1404 /* acknowledge interrupts */
1405 status = RT_READ(sc, sc->fe_int_status);
1406 RT_WRITE(sc, sc->fe_int_status, status);
1408 RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1410 if (status == 0xffffffff || /* device likely went away */
1411 status == 0) /* not for us */
1416 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1419 if (status & CNT_PPE_AF)
1422 if (status & CNT_GDM_AF)
1425 if (status & PSE_P2_FC)
1428 if (status & GDM_CRC_DROP)
1429 rt_gdm_crc_drop(sc);
1431 if (status & PSE_BUF_DROP)
1432 rt_pse_buf_drop(sc);
1434 if (status & GDM_OTHER_DROP)
1435 rt_gdm_other_drop(sc);
1437 if (status & PSE_P1_FC)
1440 if (status & PSE_P0_FC)
1443 if (status & PSE_FQ_EMPTY)
1444 rt_pse_fq_empty(sc);
1446 if (status & INT_TX_COHERENT)
1447 rt_tx_coherent_intr(sc);
1449 if (status & INT_RX_COHERENT)
1450 rt_rx_coherent_intr(sc);
1452 if (status & RX_DLY_INT)
1453 rt_rx_delay_intr(sc);
1455 if (status & TX_DLY_INT)
1456 rt_tx_delay_intr(sc);
1458 if (status & INT_RX_DONE)
1461 if (status & INT_TXQ3_DONE)
1464 if (status & INT_TXQ2_DONE)
1467 if (status & INT_TXQ1_DONE)
1470 if (status & INT_TXQ0_DONE)
1475 * rt_rt5350_intr - main ISR for Ralink 5350 SoC
1478 rt_rt5350_intr(void *arg)
1480 struct rt_softc *sc;
1487 /* acknowledge interrupts */
1488 status = RT_READ(sc, sc->fe_int_status);
1489 RT_WRITE(sc, sc->fe_int_status, status);
1491 RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1493 if (status == 0xffffffff || /* device likely went away */
1494 status == 0) /* not for us */
1499 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1502 if (status & RT5350_INT_TX_COHERENT)
1503 rt_tx_coherent_intr(sc);
1504 if (status & RT5350_INT_RX_COHERENT)
1505 rt_rx_coherent_intr(sc);
1506 if (status & RT5350_RX_DLY_INT)
1507 rt_rx_delay_intr(sc);
1508 if (status & RT5350_TX_DLY_INT)
1509 rt_tx_delay_intr(sc);
1510 if (status & RT5350_INT_RXQ1_DONE)
1512 if (status & RT5350_INT_RXQ0_DONE)
1514 if (status & RT5350_INT_TXQ3_DONE)
1516 if (status & RT5350_INT_TXQ2_DONE)
1518 if (status & RT5350_INT_TXQ1_DONE)
1520 if (status & RT5350_INT_TXQ0_DONE)
1525 rt_tx_coherent_intr(struct rt_softc *sc)
1530 RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n");
1532 sc->tx_coherent_interrupts++;
1534 /* restart DMA engine */
1535 tmp = RT_READ(sc, sc->pdma_glo_cfg);
1536 tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN);
1537 RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1539 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
1540 rt_reset_tx_ring(sc, &sc->tx_ring[i]);
1542 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
1543 RT_WRITE(sc, sc->tx_base_ptr[i],
1544 sc->tx_ring[i].desc_phys_addr);
1545 RT_WRITE(sc, sc->tx_max_cnt[i],
1546 RT_SOFTC_TX_RING_DESC_COUNT);
1547 RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
1554 * rt_rx_coherent_intr
1557 rt_rx_coherent_intr(struct rt_softc *sc)
1562 RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n");
1564 sc->rx_coherent_interrupts++;
1566 /* restart DMA engine */
1567 tmp = RT_READ(sc, sc->pdma_glo_cfg);
1568 tmp &= ~(FE_RX_DMA_EN);
1569 RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1572 for (i = 0; i < sc->rx_ring_count; i++)
1573 rt_reset_rx_ring(sc, &sc->rx_ring[i]);
1575 for (i = 0; i < sc->rx_ring_count; i++) {
1576 RT_WRITE(sc, sc->rx_base_ptr[i],
1577 sc->rx_ring[i].desc_phys_addr);
1578 RT_WRITE(sc, sc->rx_max_cnt[i],
1579 RT_SOFTC_RX_RING_DATA_COUNT);
1580 RT_WRITE(sc, sc->rx_calc_idx[i],
1581 RT_SOFTC_RX_RING_DATA_COUNT - 1);
1588 * rt_rx_intr - a packet received
1591 rt_rx_intr(struct rt_softc *sc, int qid)
1593 KASSERT(qid >= 0 && qid < sc->rx_ring_count,
1594 ("%s: Rx interrupt: invalid qid=%d\n",
1595 device_get_nameunit(sc->dev), qid));
1597 RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n");
1598 sc->rx_interrupts[qid]++;
1601 if (!(sc->intr_disable_mask & (sc->int_rx_done_mask << qid))) {
1602 rt_intr_disable(sc, (sc->int_rx_done_mask << qid));
1603 taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1606 sc->intr_pending_mask |= (sc->int_rx_done_mask << qid);
1607 RT_SOFTC_UNLOCK(sc);
1611 rt_rx_delay_intr(struct rt_softc *sc)
1614 RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n");
1615 sc->rx_delay_interrupts++;
1619 rt_tx_delay_intr(struct rt_softc *sc)
1622 RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n");
1623 sc->tx_delay_interrupts++;
1627 * rt_tx_intr - Transsmition of packet done
1630 rt_tx_intr(struct rt_softc *sc, int qid)
1633 KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1634 ("%s: Tx interrupt: invalid qid=%d\n",
1635 device_get_nameunit(sc->dev), qid));
1637 RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid);
1639 sc->tx_interrupts[qid]++;
1642 if (!(sc->intr_disable_mask & (sc->int_tx_done_mask << qid))) {
1643 rt_intr_disable(sc, (sc->int_tx_done_mask << qid));
1644 taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1647 sc->intr_pending_mask |= (sc->int_tx_done_mask << qid);
1648 RT_SOFTC_UNLOCK(sc);
1652 * rt_rx_done_task - run RX task
1655 rt_rx_done_task(void *context, int pending)
1657 struct rt_softc *sc;
1664 RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n");
1666 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1669 sc->intr_pending_mask &= ~sc->int_rx_done_mask;
1671 again = rt_rx_eof(sc, &sc->rx_ring[0], sc->rx_process_limit);
1675 if ((sc->intr_pending_mask & sc->int_rx_done_mask) || again) {
1676 RT_DPRINTF(sc, RT_DEBUG_RX,
1677 "Rx done task: scheduling again\n");
1678 taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1680 rt_intr_enable(sc, sc->int_rx_done_mask);
1683 RT_SOFTC_UNLOCK(sc);
1687 * rt_tx_done_task - check for pending TX task in all queues
1690 rt_tx_done_task(void *context, int pending)
1692 struct rt_softc *sc;
1700 RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n");
1702 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1705 for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) {
1706 if (sc->intr_pending_mask & (sc->int_tx_done_mask << i)) {
1707 sc->intr_pending_mask &= ~(sc->int_tx_done_mask << i);
1708 rt_tx_eof(sc, &sc->tx_ring[i]);
1714 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1716 if(sc->rt_chipid == RT_CHIPID_RT5350 ||
1717 sc->rt_chipid == RT_CHIPID_MT7620)
1719 RT5350_INT_TXQ3_DONE |
1720 RT5350_INT_TXQ2_DONE |
1721 RT5350_INT_TXQ1_DONE |
1722 RT5350_INT_TXQ0_DONE);
1732 rt_intr_enable(sc, ~sc->intr_pending_mask &
1733 (sc->intr_disable_mask & intr_mask));
1735 if (sc->intr_pending_mask & intr_mask) {
1736 RT_DPRINTF(sc, RT_DEBUG_TX,
1737 "Tx done task: scheduling again\n");
1738 taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1741 RT_SOFTC_UNLOCK(sc);
1743 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1748 * rt_periodic_task - run periodic task
1751 rt_periodic_task(void *context, int pending)
1753 struct rt_softc *sc;
1759 RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n",
1760 sc->periodic_round);
1762 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1766 sc->periodic_round++;
1767 rt_update_stats(sc);
1769 if ((sc->periodic_round % 10) == 0) {
1770 rt_update_raw_counters(sc);
1774 RT_SOFTC_UNLOCK(sc);
1775 callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
1779 * rt_rx_eof - check for frames that done by DMA engine and pass it into
1780 * network subsystem.
1783 rt_rx_eof(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int limit)
1786 /* struct rt_softc_rx_ring *ring; */
1787 struct rt_rxdesc *desc;
1788 struct rt_softc_rx_data *data;
1789 struct mbuf *m, *mnew;
1790 bus_dma_segment_t segs[1];
1791 bus_dmamap_t dma_map;
1792 uint32_t index, desc_flags;
1793 int error, nsegs, len, nframes;
1796 /* ring = &sc->rx_ring[0]; */
1800 while (limit != 0) {
1801 index = RT_READ(sc, sc->rx_drx_idx[0]);
1802 if (ring->cur == index)
1805 desc = &ring->desc[ring->cur];
1806 data = &ring->data[ring->cur];
1808 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1809 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1812 if ( sc->debug & RT_DEBUG_RX ) {
1813 printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc);
1814 hexdump(desc, 16, 0, 0);
1815 printf("-----------------------------------\n");
1819 /* XXX Sometime device don`t set DDONE bit */
1821 if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) {
1822 RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n");
1827 len = le16toh(desc->sdl0) & 0x3fff;
1828 RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len);
1832 mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1835 sc->rx_mbuf_alloc_errors++;
1836 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1840 mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE;
1842 error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1843 ring->spare_dma_map, mnew, segs, &nsegs, BUS_DMA_NOWAIT);
1845 RT_DPRINTF(sc, RT_DEBUG_RX,
1846 "could not load Rx mbuf DMA map: "
1847 "error=%d, nsegs=%d\n",
1852 sc->rx_mbuf_dmamap_errors++;
1853 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1858 KASSERT(nsegs == 1, ("%s: too many DMA segments",
1859 device_get_nameunit(sc->dev)));
1861 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1862 BUS_DMASYNC_POSTREAD);
1863 bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1865 dma_map = data->dma_map;
1866 data->dma_map = ring->spare_dma_map;
1867 ring->spare_dma_map = dma_map;
1869 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1870 BUS_DMASYNC_PREREAD);
1873 desc_flags = desc->src;
1876 /* Add 2 for proper align of RX IP header */
1877 desc->sdp0 = htole32(segs[0].ds_addr+2);
1878 desc->sdl0 = htole32(segs[0].ds_len-2);
1883 RT_DPRINTF(sc, RT_DEBUG_RX,
1884 "Rx frame: rxdesc flags=0x%08x\n", desc_flags);
1886 m->m_pkthdr.rcvif = ifp;
1887 /* Add 2 to fix data align, after sdp0 = addr + 2 */
1889 m->m_pkthdr.len = m->m_len = len;
1891 /* check for crc errors */
1892 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1893 /*check for valid checksum*/
1894 if (desc_flags & (RXDSXR_SRC_IP_CSUM_FAIL|
1895 RXDSXR_SRC_L4_CSUM_FAIL)) {
1896 RT_DPRINTF(sc, RT_DEBUG_RX,
1897 "rxdesc: crc error\n");
1899 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1901 if (!(ifp->if_flags & IFF_PROMISC)) {
1906 if ((desc_flags & RXDSXR_SRC_IP_CSUM_FAIL) != 0) {
1907 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1908 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1909 m->m_pkthdr.csum_data = 0xffff;
1911 m->m_flags &= ~M_HASFCS;
1914 (*ifp->if_input)(ifp, m);
1916 desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
1918 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1919 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1921 ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT;
1927 RT_WRITE(sc, sc->rx_calc_idx[0],
1928 RT_SOFTC_RX_RING_DATA_COUNT - 1);
1930 RT_WRITE(sc, sc->rx_calc_idx[0],
1933 RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes);
1935 sc->rx_packets += nframes;
1937 return (limit == 0);
1941 * rt_tx_eof - check for successful transmitted frames and mark their
1942 * descriptor as free.
1945 rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
1948 struct rt_txdesc *desc;
1949 struct rt_softc_tx_data *data;
1951 int ndescs, nframes;
1959 index = RT_READ(sc, sc->tx_dtx_idx[ring->qid]);
1960 if (ring->desc_next == index)
1965 desc = &ring->desc[ring->desc_next];
1967 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1968 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1970 if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) ||
1971 desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) {
1974 data = &ring->data[ring->data_next];
1976 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1977 BUS_DMASYNC_POSTWRITE);
1978 bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1984 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1986 RT_SOFTC_TX_RING_LOCK(ring);
1987 ring->data_queued--;
1988 ring->data_next = (ring->data_next + 1) %
1989 RT_SOFTC_TX_RING_DATA_COUNT;
1990 RT_SOFTC_TX_RING_UNLOCK(ring);
1993 desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE);
1995 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1996 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1998 RT_SOFTC_TX_RING_LOCK(ring);
1999 ring->desc_queued--;
2000 ring->desc_next = (ring->desc_next + 1) %
2001 RT_SOFTC_TX_RING_DESC_COUNT;
2002 RT_SOFTC_TX_RING_UNLOCK(ring);
2005 RT_DPRINTF(sc, RT_DEBUG_TX,
2006 "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs,
2011 * rt_update_stats - query statistics counters and update related variables.
2014 rt_update_stats(struct rt_softc *sc)
2019 RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n");
2020 /* XXX do update stats here */
2024 * rt_watchdog - reinit device on watchdog event.
2027 rt_watchdog(struct rt_softc *sc)
2033 if(sc->rt_chipid != RT_CHIPID_RT5350 &&
2034 sc->rt_chipid != RT_CHIPID_MT7620) {
2035 tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA);
2037 RT_DPRINTF(sc, RT_DEBUG_WATCHDOG,
2038 "watchdog: PSE_IQ_STA=0x%08x\n", tmp);
2040 /* XXX: do not reset */
2042 if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) {
2043 sc->tx_queue_not_empty[0]++;
2045 for (ntries = 0; ntries < 10; ntries++) {
2046 tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2047 if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0)
2054 if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) {
2055 sc->tx_queue_not_empty[1]++;
2057 for (ntries = 0; ntries < 10; ntries++) {
2058 tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2059 if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0)
2069 * rt_update_raw_counters - update counters.
2072 rt_update_raw_counters(struct rt_softc *sc)
2075 sc->tx_bytes += RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0);
2076 sc->tx_packets += RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0);
2077 sc->tx_skip += RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0);
2078 sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0);
2080 sc->rx_bytes += RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0);
2081 sc->rx_packets += RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0);
2082 sc->rx_crc_err += RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0);
2083 sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0);
2084 sc->rx_long_err += RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0);
2085 sc->rx_phy_err += RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0);
2086 sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0);
2090 rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask)
2094 sc->intr_disable_mask &= ~intr_mask;
2095 tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2096 RT_WRITE(sc, sc->fe_int_enable, tmp);
2100 rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask)
2104 sc->intr_disable_mask |= intr_mask;
2105 tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2106 RT_WRITE(sc, sc->fe_int_enable, tmp);
2110 * rt_txrx_enable - enable TX/RX DMA
2113 rt_txrx_enable(struct rt_softc *sc)
2121 /* enable Tx/Rx DMA engine */
2122 for (ntries = 0; ntries < 200; ntries++) {
2123 tmp = RT_READ(sc, sc->pdma_glo_cfg);
2124 if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
2130 if (ntries == 200) {
2131 device_printf(sc->dev, "timeout waiting for DMA engine\n");
2137 tmp |= FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN;
2138 RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
2140 /* XXX set Rx filter */
2145 * rt_alloc_rx_ring - allocate RX DMA ring buffer
2148 rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int qid)
2150 struct rt_rxdesc *desc;
2151 struct rt_softc_rx_data *data;
2152 bus_dma_segment_t segs[1];
2153 int i, nsegs, error;
2155 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2156 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2157 RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1,
2158 RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2159 0, NULL, NULL, &ring->desc_dma_tag);
2161 device_printf(sc->dev,
2162 "could not create Rx desc DMA tag\n");
2166 error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2167 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2169 device_printf(sc->dev,
2170 "could not allocate Rx desc DMA memory\n");
2174 error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2176 RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2177 rt_dma_map_addr, &ring->desc_phys_addr, 0);
2179 device_printf(sc->dev, "could not load Rx desc DMA map\n");
2183 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2184 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2185 MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL,
2186 &ring->data_dma_tag);
2188 device_printf(sc->dev,
2189 "could not create Rx data DMA tag\n");
2193 for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2194 desc = &ring->desc[i];
2195 data = &ring->data[i];
2197 error = bus_dmamap_create(ring->data_dma_tag, 0,
2200 device_printf(sc->dev, "could not create Rx data DMA "
2205 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
2207 if (data->m == NULL) {
2208 device_printf(sc->dev, "could not allocate Rx mbuf\n");
2213 data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE;
2215 error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
2216 data->dma_map, data->m, segs, &nsegs, BUS_DMA_NOWAIT);
2218 device_printf(sc->dev,
2219 "could not load Rx mbuf DMA map\n");
2223 KASSERT(nsegs == 1, ("%s: too many DMA segments",
2224 device_get_nameunit(sc->dev)));
2226 /* Add 2 for proper align of RX IP header */
2227 desc->sdp0 = htole32(segs[0].ds_addr+2);
2228 desc->sdl0 = htole32(segs[0].ds_len-2);
2231 error = bus_dmamap_create(ring->data_dma_tag, 0,
2232 &ring->spare_dma_map);
2234 device_printf(sc->dev,
2235 "could not create Rx spare DMA map\n");
2239 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2240 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2245 rt_free_rx_ring(sc, ring);
2250 * rt_reset_rx_ring - reset RX ring buffer
2253 rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2255 struct rt_rxdesc *desc;
2258 for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2259 desc = &ring->desc[i];
2260 desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
2263 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2264 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2269 * rt_free_rx_ring - free memory used by RX ring buffer
2272 rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2274 struct rt_softc_rx_data *data;
2277 if (ring->desc != NULL) {
2278 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2279 BUS_DMASYNC_POSTWRITE);
2280 bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2281 bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2282 ring->desc_dma_map);
2285 if (ring->desc_dma_tag != NULL)
2286 bus_dma_tag_destroy(ring->desc_dma_tag);
2288 for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2289 data = &ring->data[i];
2291 if (data->m != NULL) {
2292 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2293 BUS_DMASYNC_POSTREAD);
2294 bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2298 if (data->dma_map != NULL)
2299 bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2302 if (ring->spare_dma_map != NULL)
2303 bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map);
2305 if (ring->data_dma_tag != NULL)
2306 bus_dma_tag_destroy(ring->data_dma_tag);
2310 * rt_alloc_tx_ring - allocate TX ring buffer
2313 rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid)
2315 struct rt_softc_tx_data *data;
2318 mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF);
2320 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2321 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2322 RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1,
2323 RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc),
2324 0, NULL, NULL, &ring->desc_dma_tag);
2326 device_printf(sc->dev,
2327 "could not create Tx desc DMA tag\n");
2331 error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2332 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2334 device_printf(sc->dev,
2335 "could not allocate Tx desc DMA memory\n");
2339 error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2340 ring->desc, (RT_SOFTC_TX_RING_DESC_COUNT *
2341 sizeof(struct rt_txdesc)), rt_dma_map_addr,
2342 &ring->desc_phys_addr, 0);
2344 device_printf(sc->dev, "could not load Tx desc DMA map\n");
2348 ring->desc_queued = 0;
2350 ring->desc_next = 0;
2352 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2353 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2354 RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1,
2355 RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2356 0, NULL, NULL, &ring->seg0_dma_tag);
2358 device_printf(sc->dev,
2359 "could not create Tx seg0 DMA tag\n");
2363 error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0,
2364 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map);
2366 device_printf(sc->dev,
2367 "could not allocate Tx seg0 DMA memory\n");
2371 error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map,
2373 RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2374 rt_dma_map_addr, &ring->seg0_phys_addr, 0);
2376 device_printf(sc->dev, "could not load Tx seg0 DMA map\n");
2380 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2381 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2382 MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL,
2383 &ring->data_dma_tag);
2385 device_printf(sc->dev,
2386 "could not create Tx data DMA tag\n");
2390 for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2391 data = &ring->data[i];
2393 error = bus_dmamap_create(ring->data_dma_tag, 0,
2396 device_printf(sc->dev, "could not create Tx data DMA "
2402 ring->data_queued = 0;
2404 ring->data_next = 0;
2410 rt_free_tx_ring(sc, ring);
2415 * rt_reset_tx_ring - reset TX ring buffer to empty state
2418 rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2420 struct rt_softc_tx_data *data;
2421 struct rt_txdesc *desc;
2424 for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) {
2425 desc = &ring->desc[i];
2431 ring->desc_queued = 0;
2433 ring->desc_next = 0;
2435 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2436 BUS_DMASYNC_PREWRITE);
2438 bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2439 BUS_DMASYNC_PREWRITE);
2441 for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2442 data = &ring->data[i];
2444 if (data->m != NULL) {
2445 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2446 BUS_DMASYNC_POSTWRITE);
2447 bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2453 ring->data_queued = 0;
2455 ring->data_next = 0;
2459 * rt_free_tx_ring - free RX ring buffer
2462 rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2464 struct rt_softc_tx_data *data;
2467 if (ring->desc != NULL) {
2468 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2469 BUS_DMASYNC_POSTWRITE);
2470 bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2471 bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2472 ring->desc_dma_map);
2475 if (ring->desc_dma_tag != NULL)
2476 bus_dma_tag_destroy(ring->desc_dma_tag);
2478 if (ring->seg0 != NULL) {
2479 bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2480 BUS_DMASYNC_POSTWRITE);
2481 bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map);
2482 bus_dmamem_free(ring->seg0_dma_tag, ring->seg0,
2483 ring->seg0_dma_map);
2486 if (ring->seg0_dma_tag != NULL)
2487 bus_dma_tag_destroy(ring->seg0_dma_tag);
2489 for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2490 data = &ring->data[i];
2492 if (data->m != NULL) {
2493 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2494 BUS_DMASYNC_POSTWRITE);
2495 bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2499 if (data->dma_map != NULL)
2500 bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2503 if (ring->data_dma_tag != NULL)
2504 bus_dma_tag_destroy(ring->data_dma_tag);
2506 mtx_destroy(&ring->lock);
2510 * rt_dma_map_addr - get address of busdma segment
2513 rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2518 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2520 *(bus_addr_t *) arg = segs[0].ds_addr;
2524 * rt_sysctl_attach - attach sysctl nodes for NIC counters.
2527 rt_sysctl_attach(struct rt_softc *sc)
2529 struct sysctl_ctx_list *ctx;
2530 struct sysctl_oid *tree;
2531 struct sysctl_oid *stats;
2533 ctx = device_get_sysctl_ctx(sc->dev);
2534 tree = device_get_sysctl_tree(sc->dev);
2536 /* statistic counters */
2537 stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2538 "stats", CTLFLAG_RD, 0, "statistic");
2540 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2541 "interrupts", CTLFLAG_RD, &sc->interrupts,
2544 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2545 "tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts,
2546 "Tx coherent interrupts");
2548 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2549 "rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts,
2550 "Rx coherent interrupts");
2552 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2553 "rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts[0],
2556 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2557 "rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts,
2558 "Rx delay interrupts");
2560 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2561 "TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3],
2562 "Tx AC3 interrupts");
2564 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2565 "TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2],
2566 "Tx AC2 interrupts");
2568 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2569 "TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1],
2570 "Tx AC1 interrupts");
2572 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2573 "TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0],
2574 "Tx AC0 interrupts");
2576 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2577 "tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts,
2578 "Tx delay interrupts");
2580 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2581 "TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued,
2582 0, "Tx AC3 descriptors queued");
2584 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2585 "TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued,
2586 0, "Tx AC3 data queued");
2588 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2589 "TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued,
2590 0, "Tx AC2 descriptors queued");
2592 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2593 "TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued,
2594 0, "Tx AC2 data queued");
2596 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2597 "TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued,
2598 0, "Tx AC1 descriptors queued");
2600 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2601 "TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued,
2602 0, "Tx AC1 data queued");
2604 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2605 "TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued,
2606 0, "Tx AC0 descriptors queued");
2608 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2609 "TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued,
2610 0, "Tx AC0 data queued");
2612 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2613 "TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3],
2614 "Tx AC3 data queue full");
2616 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2617 "TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2],
2618 "Tx AC2 data queue full");
2620 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2621 "TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1],
2622 "Tx AC1 data queue full");
2624 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2625 "TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0],
2626 "Tx AC0 data queue full");
2628 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2629 "tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts,
2630 "Tx watchdog timeouts");
2632 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2633 "tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets,
2634 "Tx defragmented packets");
2636 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2637 "no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail,
2638 "no Tx descriptors available");
2640 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2641 "rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors,
2642 "Rx mbuf allocation errors");
2644 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2645 "rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors,
2646 "Rx mbuf DMA mapping errors");
2648 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2649 "tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0],
2650 "Tx queue 0 not empty");
2652 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2653 "tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1],
2654 "Tx queue 1 not empty");
2656 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2657 "rx_packets", CTLFLAG_RD, &sc->rx_packets,
2660 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2661 "rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err,
2664 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2665 "rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err,
2668 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2669 "rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets,
2670 "Rx duplicate packets");
2672 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2673 "rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows,
2674 "Rx FIFO overflows");
2676 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2677 "rx_bytes", CTLFLAG_RD, &sc->rx_bytes,
2680 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2681 "rx_long_err", CTLFLAG_RD, &sc->rx_long_err,
2682 "Rx too long frame errors");
2684 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2685 "rx_short_err", CTLFLAG_RD, &sc->rx_short_err,
2686 "Rx too short frame errors");
2688 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2689 "tx_bytes", CTLFLAG_RD, &sc->tx_bytes,
2692 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2693 "tx_packets", CTLFLAG_RD, &sc->tx_packets,
2696 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2697 "tx_skip", CTLFLAG_RD, &sc->tx_skip,
2698 "Tx skip count for GDMA ports");
2700 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2701 "tx_collision", CTLFLAG_RD, &sc->tx_collision,
2702 "Tx collision count for GDMA ports");
2705 #ifdef IF_RT_PHY_SUPPORT
2707 rt_miibus_readreg(device_t dev, int phy, int reg)
2709 struct rt_softc *sc = device_get_softc(dev);
2712 * PSEUDO_PHYAD is a special value for indicate switch attached.
2713 * No one PHY use PSEUDO_PHYAD (0x1e) address.
2716 /* Fake PHY ID for bfeswitch attach */
2719 return (BMSR_EXTSTAT|BMSR_MEDIAMASK);
2721 return (0x40); /* As result of faking */
2722 case MII_PHYIDR2: /* PHY will detect as */
2723 return (0x6250); /* bfeswitch */
2727 /* Wait prev command done if any */
2728 while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2729 RT_WRITE(sc, MDIO_ACCESS,
2731 ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2732 ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK));
2733 while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2735 return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK);
2739 rt_miibus_writereg(device_t dev, int phy, int reg, int val)
2741 struct rt_softc *sc = device_get_softc(dev);
2743 /* Wait prev command done if any */
2744 while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2745 RT_WRITE(sc, MDIO_ACCESS,
2746 MDIO_CMD_ONGO || MDIO_CMD_WR ||
2747 ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2748 ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) ||
2749 (val & MDIO_PHY_DATA_MASK));
2750 while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2756 rt_miibus_statchg(device_t dev)
2758 struct rt_softc *sc = device_get_softc(dev);
2759 struct mii_data *mii;
2761 mii = device_get_softc(sc->rt_miibus);
2763 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2764 (IFM_ACTIVE | IFM_AVALID)) {
2765 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2768 /* XXX check link here */
2776 #endif /* IF_RT_PHY_SUPPORT */
2778 static device_method_t rt_dev_methods[] =
2780 DEVMETHOD(device_probe, rt_probe),
2781 DEVMETHOD(device_attach, rt_attach),
2782 DEVMETHOD(device_detach, rt_detach),
2783 DEVMETHOD(device_shutdown, rt_shutdown),
2784 DEVMETHOD(device_suspend, rt_suspend),
2785 DEVMETHOD(device_resume, rt_resume),
2787 #ifdef IF_RT_PHY_SUPPORT
2789 DEVMETHOD(miibus_readreg, rt_miibus_readreg),
2790 DEVMETHOD(miibus_writereg, rt_miibus_writereg),
2791 DEVMETHOD(miibus_statchg, rt_miibus_statchg),
2797 static driver_t rt_driver =
2801 sizeof(struct rt_softc)
2804 static devclass_t rt_dev_class;
2806 DRIVER_MODULE(rt, nexus, rt_driver, rt_dev_class, 0, 0);
2808 DRIVER_MODULE(rt, simplebus, rt_driver, rt_dev_class, 0, 0);
2811 MODULE_DEPEND(rt, ether, 1, 1, 1);
2812 MODULE_DEPEND(rt, miibus, 1, 1, 1);