2 * Copyright (c) 2015-2016, Stanislav Galabov
3 * Copyright (c) 2014, Aleksandr A. Mityaev
4 * Copyright (c) 2011, Aleksandr Rybalko
6 * by Alexander Egorenkov <egorenar@gmail.com>
7 * and by Damien Bergamini <damien.bergamini@free.fr>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice unmodified, this list of conditions, and the following
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
40 #include <net/if_var.h>
41 #include <net/if_arp.h>
42 #include <net/ethernet.h>
43 #include <net/if_dl.h>
44 #include <net/if_media.h>
45 #include <net/if_types.h>
46 #include <net/if_vlan_var.h>
50 #include <machine/bus.h>
51 #include <machine/cache.h>
52 #include <machine/cpufunc.h>
53 #include <machine/resource.h>
54 #include <vm/vm_param.h>
57 #include <machine/pmap.h>
61 #include "opt_platform.h"
62 #include "opt_rt305x.h"
65 #include <dev/ofw/openfirm.h>
66 #include <dev/ofw/ofw_bus.h>
67 #include <dev/ofw/ofw_bus_subr.h>
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
74 #include <dev/mdio/mdio.h>
75 #include <dev/etherswitch/miiproxy.h>
80 #include <mips/rt305x/rt305x_sysctlvar.h>
81 #include <mips/rt305x/rt305xreg.h>
84 #ifdef IF_RT_PHY_SUPPORT
85 #include "miibus_if.h"
91 #define RT_MAX_AGG_SIZE 3840
93 #define RT_TX_DATA_SEG0_SIZE MJUMPAGESIZE
95 #define RT_MS(_v, _f) (((_v) & _f) >> _f##_S)
96 #define RT_SM(_v, _f) (((_v) << _f##_S) & _f)
98 #define RT_TX_WATCHDOG_TIMEOUT 5
100 #define RT_CHIPID_RT2880 0x2880
101 #define RT_CHIPID_RT3050 0x3050
102 #define RT_CHIPID_RT5350 0x5350
103 #define RT_CHIPID_MT7620 0x7620
104 #define RT_CHIPID_MT7621 0x7621
107 /* more specific and new models should go first */
108 static const struct ofw_compat_data rt_compat_data[] = {
109 { "ralink,rt2880-eth", RT_CHIPID_RT2880 },
110 { "ralink,rt3050-eth", RT_CHIPID_RT3050 },
111 { "ralink,rt3352-eth", RT_CHIPID_RT3050 },
112 { "ralink,rt3883-eth", RT_CHIPID_RT3050 },
113 { "ralink,rt5350-eth", RT_CHIPID_RT5350 },
114 { "ralink,mt7620a-eth", RT_CHIPID_MT7620 },
115 { "mediatek,mt7620-eth", RT_CHIPID_MT7620 },
116 { "ralink,mt7621-eth", RT_CHIPID_MT7621 },
117 { "mediatek,mt7621-eth", RT_CHIPID_MT7621 },
123 * Static function prototypes
125 static int rt_probe(device_t dev);
126 static int rt_attach(device_t dev);
127 static int rt_detach(device_t dev);
128 static int rt_shutdown(device_t dev);
129 static int rt_suspend(device_t dev);
130 static int rt_resume(device_t dev);
131 static void rt_init_locked(void *priv);
132 static void rt_init(void *priv);
133 static void rt_stop_locked(void *priv);
134 static void rt_stop(void *priv);
135 static void rt_start(struct ifnet *ifp);
136 static int rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
137 static void rt_periodic(void *arg);
138 static void rt_tx_watchdog(void *arg);
139 static void rt_intr(void *arg);
140 static void rt_rt5350_intr(void *arg);
141 static void rt_tx_coherent_intr(struct rt_softc *sc);
142 static void rt_rx_coherent_intr(struct rt_softc *sc);
143 static void rt_rx_delay_intr(struct rt_softc *sc);
144 static void rt_tx_delay_intr(struct rt_softc *sc);
145 static void rt_rx_intr(struct rt_softc *sc, int qid);
146 static void rt_tx_intr(struct rt_softc *sc, int qid);
147 static void rt_rx_done_task(void *context, int pending);
148 static void rt_tx_done_task(void *context, int pending);
149 static void rt_periodic_task(void *context, int pending);
150 static int rt_rx_eof(struct rt_softc *sc,
151 struct rt_softc_rx_ring *ring, int limit);
152 static void rt_tx_eof(struct rt_softc *sc,
153 struct rt_softc_tx_ring *ring);
154 static void rt_update_stats(struct rt_softc *sc);
155 static void rt_watchdog(struct rt_softc *sc);
156 static void rt_update_raw_counters(struct rt_softc *sc);
157 static void rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask);
158 static void rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask);
159 static int rt_txrx_enable(struct rt_softc *sc);
160 static int rt_alloc_rx_ring(struct rt_softc *sc,
161 struct rt_softc_rx_ring *ring, int qid);
162 static void rt_reset_rx_ring(struct rt_softc *sc,
163 struct rt_softc_rx_ring *ring);
164 static void rt_free_rx_ring(struct rt_softc *sc,
165 struct rt_softc_rx_ring *ring);
166 static int rt_alloc_tx_ring(struct rt_softc *sc,
167 struct rt_softc_tx_ring *ring, int qid);
168 static void rt_reset_tx_ring(struct rt_softc *sc,
169 struct rt_softc_tx_ring *ring);
170 static void rt_free_tx_ring(struct rt_softc *sc,
171 struct rt_softc_tx_ring *ring);
172 static void rt_dma_map_addr(void *arg, bus_dma_segment_t *segs,
173 int nseg, int error);
174 static void rt_sysctl_attach(struct rt_softc *sc);
175 #ifdef IF_RT_PHY_SUPPORT
176 void rt_miibus_statchg(device_t);
178 #if defined(IF_RT_PHY_SUPPORT) || defined(RT_MDIO)
179 static int rt_miibus_readreg(device_t, int, int);
180 static int rt_miibus_writereg(device_t, int, int, int);
182 static int rt_ifmedia_upd(struct ifnet *);
183 static void rt_ifmedia_sts(struct ifnet *, struct ifmediareq *);
185 static SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters");
187 static int rt_debug = 0;
188 SYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RWTUN, &rt_debug, 0,
193 rt_probe(device_t dev)
195 struct rt_softc *sc = device_get_softc(dev);
198 const struct ofw_compat_data * cd;
200 cd = ofw_bus_search_compatible(dev, rt_compat_data);
201 if (cd->ocd_data == 0)
204 sc->rt_chipid = (unsigned int)(cd->ocd_data);
207 sc->rt_chipid = RT_CHIPID_MT7620;
208 #elif defined(MT7621)
209 sc->rt_chipid = RT_CHIPID_MT7621;
210 #elif defined(RT5350)
211 sc->rt_chipid = RT_CHIPID_RT5350;
213 sc->rt_chipid = RT_CHIPID_RT3050;
216 snprintf(buf, sizeof(buf), "Ralink %cT%x onChip Ethernet driver",
217 sc->rt_chipid >= 0x7600 ? 'M' : 'R', sc->rt_chipid);
218 device_set_desc_copy(dev, buf);
219 return (BUS_PROBE_GENERIC);
223 * macaddr_atoi - translate string MAC address to uint8_t array
226 macaddr_atoi(const char *str, uint8_t *mac)
229 unsigned int amac[ETHER_ADDR_LEN]; /* Aligned version */
231 count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
232 &amac[0], &amac[1], &amac[2],
233 &amac[3], &amac[4], &amac[5]);
234 if (count < ETHER_ADDR_LEN) {
235 memset(mac, 0, ETHER_ADDR_LEN);
239 /* Copy aligned to result */
240 for (i = 0; i < ETHER_ADDR_LEN; i ++)
241 mac[i] = (amac[i] & 0xff);
246 #ifdef USE_GENERATED_MAC_ADDRESS
248 * generate_mac(uin8_t *mac)
249 * This is MAC address generator for cases when real device MAC address
250 * unknown or not yet accessible.
251 * Use 'b','s','d' signature and 3 octets from CRC32 on kenv.
252 * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0]
254 * Output - MAC address, that do not change between reboots, if hints or
255 * bootloader info unchange.
258 generate_mac(uint8_t *mac)
262 uint32_t crc = 0xffffffff;
264 /* Generate CRC32 on kenv */
265 for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) {
266 crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
273 mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff);
274 mac[4] = (crc >> 8) & 0xff;
280 * ether_request_mac - try to find usable MAC address.
283 ether_request_mac(device_t dev, uint8_t *mac)
288 * "ethaddr" is passed via envp on RedBoot platforms
289 * "kmac" is passed via argv on RouterBOOT platforms
291 #if defined(RT305X_UBOOT) || defined(__REDBOOT__) || defined(__ROUTERBOOT__)
292 if ((var = kern_getenv("ethaddr")) != NULL ||
293 (var = kern_getenv("kmac")) != NULL ) {
295 if(!macaddr_atoi(var, mac)) {
296 printf("%s: use %s macaddr from KENV\n",
297 device_get_nameunit(dev), var);
307 * hint.[dev].[unit].macaddr
309 if (!resource_string_value(device_get_name(dev),
310 device_get_unit(dev), "macaddr", (const char **)&var)) {
312 if(!macaddr_atoi(var, mac)) {
313 printf("%s: use %s macaddr from hints\n",
314 device_get_nameunit(dev), var);
319 #ifdef USE_GENERATED_MAC_ADDRESS
322 device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x "
323 "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
333 device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n");
343 reset_freng(struct rt_softc *sc)
345 /* XXX hard reset kills everything so skip it ... */
350 rt_attach(device_t dev)
356 sc = device_get_softc(dev);
359 mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
360 MTX_DEF | MTX_RECURSE);
363 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
364 RF_ACTIVE | RF_SHAREABLE);
365 if (sc->mem == NULL) {
366 device_printf(dev, "could not allocate memory resource\n");
371 sc->bst = rman_get_bustag(sc->mem);
372 sc->bsh = rman_get_bushandle(sc->mem);
375 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
377 if (sc->irq == NULL) {
379 "could not allocate interrupt resource\n");
385 sc->debug = rt_debug;
387 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
388 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
389 "debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level");
396 if (sc->rt_chipid == RT_CHIPID_MT7620) {
397 sc->csum_fail_ip = MT7620_RXD_SRC_IP_CSUM_FAIL;
398 sc->csum_fail_l4 = MT7620_RXD_SRC_L4_CSUM_FAIL;
399 } else if (sc->rt_chipid == RT_CHIPID_MT7621) {
400 sc->csum_fail_ip = MT7621_RXD_SRC_IP_CSUM_FAIL;
401 sc->csum_fail_l4 = MT7621_RXD_SRC_L4_CSUM_FAIL;
403 sc->csum_fail_ip = RT305X_RXD_SRC_IP_CSUM_FAIL;
404 sc->csum_fail_l4 = RT305X_RXD_SRC_L4_CSUM_FAIL;
407 /* Fill in soc-specific registers map */
408 switch(sc->rt_chipid) {
409 case RT_CHIPID_MT7620:
410 case RT_CHIPID_MT7621:
411 sc->gdma1_base = MT7620_GDMA1_BASE;
413 case RT_CHIPID_RT5350:
414 device_printf(dev, "%cT%x Ethernet MAC (rev 0x%08x)\n",
415 sc->rt_chipid >= 0x7600 ? 'M' : 'R',
416 sc->rt_chipid, sc->mac_rev);
417 /* RT5350: No GDMA, PSE, CDMA, PPE */
418 RT_WRITE(sc, GE_PORT_BASE + 0x0C00, // UDPCS, TCPCS, IPCS=1
419 RT_READ(sc, GE_PORT_BASE + 0x0C00) | (0x7<<16));
420 sc->delay_int_cfg=RT5350_PDMA_BASE+RT5350_DELAY_INT_CFG;
421 sc->fe_int_status=RT5350_FE_INT_STATUS;
422 sc->fe_int_enable=RT5350_FE_INT_ENABLE;
423 sc->pdma_glo_cfg=RT5350_PDMA_BASE+RT5350_PDMA_GLO_CFG;
424 sc->pdma_rst_idx=RT5350_PDMA_BASE+RT5350_PDMA_RST_IDX;
425 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
426 sc->tx_base_ptr[i]=RT5350_PDMA_BASE+RT5350_TX_BASE_PTR(i);
427 sc->tx_max_cnt[i]=RT5350_PDMA_BASE+RT5350_TX_MAX_CNT(i);
428 sc->tx_ctx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_CTX_IDX(i);
429 sc->tx_dtx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_DTX_IDX(i);
432 sc->rx_base_ptr[0]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR0;
433 sc->rx_max_cnt[0]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT0;
434 sc->rx_calc_idx[0]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX0;
435 sc->rx_drx_idx[0]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX0;
436 sc->rx_base_ptr[1]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR1;
437 sc->rx_max_cnt[1]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT1;
438 sc->rx_calc_idx[1]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX1;
439 sc->rx_drx_idx[1]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX1;
440 sc->int_rx_done_mask=RT5350_INT_RXQ0_DONE;
441 sc->int_tx_done_mask=RT5350_INT_TXQ0_DONE;
444 device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n",
446 sc->gdma1_base = GDMA1_BASE;
447 sc->delay_int_cfg=PDMA_BASE+DELAY_INT_CFG;
448 sc->fe_int_status=GE_PORT_BASE+FE_INT_STATUS;
449 sc->fe_int_enable=GE_PORT_BASE+FE_INT_ENABLE;
450 sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG;
451 sc->pdma_rst_idx=PDMA_BASE+PDMA_RST_IDX;
452 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
453 sc->tx_base_ptr[i]=PDMA_BASE+TX_BASE_PTR(i);
454 sc->tx_max_cnt[i]=PDMA_BASE+TX_MAX_CNT(i);
455 sc->tx_ctx_idx[i]=PDMA_BASE+TX_CTX_IDX(i);
456 sc->tx_dtx_idx[i]=PDMA_BASE+TX_DTX_IDX(i);
459 sc->rx_base_ptr[0]=PDMA_BASE+RX_BASE_PTR0;
460 sc->rx_max_cnt[0]=PDMA_BASE+RX_MAX_CNT0;
461 sc->rx_calc_idx[0]=PDMA_BASE+RX_CALC_IDX0;
462 sc->rx_drx_idx[0]=PDMA_BASE+RX_DRX_IDX0;
463 sc->int_rx_done_mask=INT_RX_DONE;
464 sc->int_tx_done_mask=INT_TXQ0_DONE;
467 if (sc->gdma1_base != 0)
468 RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
470 GDM_ICS_EN | /* Enable IP Csum */
471 GDM_TCS_EN | /* Enable TCP Csum */
472 GDM_UCS_EN | /* Enable UDP Csum */
473 GDM_STRPCRC | /* Strip CRC from packet */
474 GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
475 GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
476 GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
477 GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT /* fwd Other to CPU */
480 if (sc->rt_chipid == RT_CHIPID_RT2880)
481 RT_WRITE(sc, MDIO_CFG, MDIO_2880_100T_INIT);
483 /* allocate Tx and Rx rings */
484 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
485 error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i);
487 device_printf(dev, "could not allocate Tx ring #%d\n",
493 sc->tx_ring_mgtqid = 5;
494 for (i = 0; i < sc->rx_ring_count; i++) {
495 error = rt_alloc_rx_ring(sc, &sc->rx_ring[i], i);
497 device_printf(dev, "could not allocate Rx ring\n");
502 callout_init(&sc->periodic_ch, 0);
503 callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0);
505 ifp = sc->ifp = if_alloc(IFT_ETHER);
507 device_printf(dev, "could not if_alloc()\n");
513 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
514 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
515 ifp->if_init = rt_init;
516 ifp->if_ioctl = rt_ioctl;
517 ifp->if_start = rt_start;
518 #define RT_TX_QLEN 256
520 IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN);
521 ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN;
522 IFQ_SET_READY(&ifp->if_snd);
524 #ifdef IF_RT_PHY_SUPPORT
525 error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd,
526 rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
528 device_printf(dev, "attaching PHYs failed\n");
533 ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts);
534 ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0,
536 ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX);
538 #endif /* IF_RT_PHY_SUPPORT */
540 ether_request_mac(dev, sc->mac_addr);
541 ether_ifattach(ifp, sc->mac_addr);
544 * Tell the upper layer(s) we support long frames.
546 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
547 ifp->if_capabilities |= IFCAP_VLAN_MTU;
548 ifp->if_capenable |= IFCAP_VLAN_MTU;
549 ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM;
550 ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM;
552 /* init task queue */
553 TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
554 TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc);
555 TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc);
557 sc->rx_process_limit = 100;
559 sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT,
560 taskqueue_thread_enqueue, &sc->taskqueue);
562 taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq",
563 device_get_nameunit(sc->dev));
565 rt_sysctl_attach(sc);
567 /* set up interrupt */
568 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
569 NULL, (sc->rt_chipid == RT_CHIPID_RT5350 ||
570 sc->rt_chipid == RT_CHIPID_MT7620 ||
571 sc->rt_chipid == RT_CHIPID_MT7621) ? rt_rt5350_intr : rt_intr,
574 printf("%s: could not set up interrupt\n",
575 device_get_nameunit(dev));
579 device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug));
585 /* free Tx and Rx rings */
586 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
587 rt_free_tx_ring(sc, &sc->tx_ring[i]);
589 for (i = 0; i < sc->rx_ring_count; i++)
590 rt_free_rx_ring(sc, &sc->rx_ring[i]);
592 mtx_destroy(&sc->lock);
595 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
599 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid,
609 rt_ifmedia_upd(struct ifnet *ifp)
612 #ifdef IF_RT_PHY_SUPPORT
613 struct mii_data *mii;
614 struct mii_softc *miisc;
620 mii = device_get_softc(sc->rt_miibus);
621 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
623 error = mii_mediachg(mii);
628 #else /* !IF_RT_PHY_SUPPORT */
631 struct ifmedia_entry *ife;
634 ifm = &sc->rt_ifmedia;
637 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
640 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
641 device_printf(sc->dev,
642 "AUTO is not supported for multiphy MAC");
650 #endif /* IF_RT_PHY_SUPPORT */
654 * Report current media status.
657 rt_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
659 #ifdef IF_RT_PHY_SUPPORT
661 struct mii_data *mii;
666 mii = device_get_softc(sc->rt_miibus);
668 ifmr->ifm_active = mii->mii_media_active;
669 ifmr->ifm_status = mii->mii_media_status;
670 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
671 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
673 #else /* !IF_RT_PHY_SUPPORT */
675 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
676 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
677 #endif /* IF_RT_PHY_SUPPORT */
681 rt_detach(device_t dev)
687 sc = device_get_softc(dev);
690 RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n");
694 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
696 callout_stop(&sc->periodic_ch);
697 callout_stop(&sc->tx_watchdog_ch);
699 taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
700 taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
701 taskqueue_drain(sc->taskqueue, &sc->periodic_task);
703 /* free Tx and Rx rings */
704 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
705 rt_free_tx_ring(sc, &sc->tx_ring[i]);
706 for (i = 0; i < sc->rx_ring_count; i++)
707 rt_free_rx_ring(sc, &sc->rx_ring[i]);
711 #ifdef IF_RT_PHY_SUPPORT
712 if (sc->rt_miibus != NULL)
713 device_delete_child(dev, sc->rt_miibus);
719 taskqueue_free(sc->taskqueue);
721 mtx_destroy(&sc->lock);
723 bus_generic_detach(dev);
724 bus_teardown_intr(dev, sc->irq, sc->irqh);
725 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
726 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
732 rt_shutdown(device_t dev)
736 sc = device_get_softc(dev);
737 RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n");
744 rt_suspend(device_t dev)
748 sc = device_get_softc(dev);
749 RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n");
756 rt_resume(device_t dev)
761 sc = device_get_softc(dev);
764 RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n");
766 if (ifp->if_flags & IFF_UP)
773 * rt_init_locked - Run initialization process having locked mtx.
776 rt_init_locked(void *priv)
780 #ifdef IF_RT_PHY_SUPPORT
781 struct mii_data *mii;
788 #ifdef IF_RT_PHY_SUPPORT
789 mii = device_get_softc(sc->rt_miibus);
792 RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n");
794 RT_SOFTC_ASSERT_LOCKED(sc);
797 //RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
798 //rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG);
800 /* Fwd to CPU (uni|broad|multi)cast and Unknown */
801 if (sc->gdma1_base != 0)
802 RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
804 GDM_ICS_EN | /* Enable IP Csum */
805 GDM_TCS_EN | /* Enable TCP Csum */
806 GDM_UCS_EN | /* Enable UDP Csum */
807 GDM_STRPCRC | /* Strip CRC from packet */
808 GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
809 GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
810 GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
811 GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT /* fwd Other to CPU */
814 /* disable DMA engine */
815 RT_WRITE(sc, sc->pdma_glo_cfg, 0);
816 RT_WRITE(sc, sc->pdma_rst_idx, 0xffffffff);
818 /* wait while DMA engine is busy */
819 for (ntries = 0; ntries < 100; ntries++) {
820 tmp = RT_READ(sc, sc->pdma_glo_cfg);
821 if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
827 device_printf(sc->dev, "timeout waiting for DMA engine\n");
831 /* reset Rx and Tx rings */
832 tmp = FE_RST_DRX_IDX0 |
838 RT_WRITE(sc, sc->pdma_rst_idx, tmp);
840 /* XXX switch set mac address */
841 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
842 rt_reset_tx_ring(sc, &sc->tx_ring[i]);
844 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
845 /* update TX_BASE_PTRx */
846 RT_WRITE(sc, sc->tx_base_ptr[i],
847 sc->tx_ring[i].desc_phys_addr);
848 RT_WRITE(sc, sc->tx_max_cnt[i],
849 RT_SOFTC_TX_RING_DESC_COUNT);
850 RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
854 for (i = 0; i < sc->rx_ring_count; i++)
855 rt_reset_rx_ring(sc, &sc->rx_ring[i]);
857 /* update RX_BASE_PTRx */
858 for (i = 0; i < sc->rx_ring_count; i++) {
859 RT_WRITE(sc, sc->rx_base_ptr[i],
860 sc->rx_ring[i].desc_phys_addr);
861 RT_WRITE(sc, sc->rx_max_cnt[i],
862 RT_SOFTC_RX_RING_DATA_COUNT);
863 RT_WRITE(sc, sc->rx_calc_idx[i],
864 RT_SOFTC_RX_RING_DATA_COUNT - 1);
867 /* write back DDONE, 16byte burst enable RX/TX DMA */
868 tmp = FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN;
869 if (sc->rt_chipid == RT_CHIPID_MT7620 ||
870 sc->rt_chipid == RT_CHIPID_MT7621)
872 RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
874 /* disable interrupts mitigation */
875 RT_WRITE(sc, sc->delay_int_cfg, 0);
877 /* clear pending interrupts */
878 RT_WRITE(sc, sc->fe_int_status, 0xffffffff);
880 /* enable interrupts */
881 if (sc->rt_chipid == RT_CHIPID_RT5350 ||
882 sc->rt_chipid == RT_CHIPID_MT7620 ||
883 sc->rt_chipid == RT_CHIPID_MT7621)
884 tmp = RT5350_INT_TX_COHERENT |
885 RT5350_INT_RX_COHERENT |
886 RT5350_INT_TXQ3_DONE |
887 RT5350_INT_TXQ2_DONE |
888 RT5350_INT_TXQ1_DONE |
889 RT5350_INT_TXQ0_DONE |
890 RT5350_INT_RXQ1_DONE |
891 RT5350_INT_RXQ0_DONE;
910 sc->intr_enable_mask = tmp;
912 RT_WRITE(sc, sc->fe_int_enable, tmp);
914 if (rt_txrx_enable(sc) != 0)
917 #ifdef IF_RT_PHY_SUPPORT
918 if (mii) mii_mediachg(mii);
919 #endif /* IF_RT_PHY_SUPPORT */
921 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
922 ifp->if_drv_flags |= IFF_DRV_RUNNING;
924 sc->periodic_round = 0;
926 callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
935 * rt_init - lock and initialize device.
949 * rt_stop_locked - stop TX/RX w/ lock
952 rt_stop_locked(void *priv)
960 RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n");
962 RT_SOFTC_ASSERT_LOCKED(sc);
964 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
965 callout_stop(&sc->periodic_ch);
966 callout_stop(&sc->tx_watchdog_ch);
968 taskqueue_block(sc->taskqueue);
971 * Sometime rt_stop_locked called from isr and we get panic
972 * When found, I fix it
975 taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
976 taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
977 taskqueue_drain(sc->taskqueue, &sc->periodic_task);
981 /* disable interrupts */
982 RT_WRITE(sc, sc->fe_int_enable, 0);
984 if(sc->rt_chipid != RT_CHIPID_RT5350 &&
985 sc->rt_chipid != RT_CHIPID_MT7620 &&
986 sc->rt_chipid != RT_CHIPID_MT7621) {
988 RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
991 if (sc->gdma1_base != 0)
992 RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
994 GDM_ICS_EN | /* Enable IP Csum */
995 GDM_TCS_EN | /* Enable TCP Csum */
996 GDM_UCS_EN | /* Enable UDP Csum */
997 GDM_STRPCRC | /* Strip CRC from packet */
998 GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
999 GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
1000 GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
1001 GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT /* fwd Other to CPU */
1008 struct rt_softc *sc;
1013 RT_SOFTC_UNLOCK(sc);
1017 * rt_tx_data - transmit packet.
1020 rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid)
1023 struct rt_softc_tx_ring *ring;
1024 struct rt_softc_tx_data *data;
1025 struct rt_txdesc *desc;
1027 bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER];
1028 int error, ndmasegs, ndescs, i;
1030 KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1031 ("%s: Tx data: invalid qid=%d\n",
1032 device_get_nameunit(sc->dev), qid));
1034 RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]);
1037 ring = &sc->tx_ring[qid];
1038 desc = &ring->desc[ring->desc_cur];
1039 data = &ring->data[ring->data_cur];
1041 error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m,
1042 dma_seg, &ndmasegs, 0);
1044 /* too many fragments, linearize */
1046 RT_DPRINTF(sc, RT_DEBUG_TX,
1047 "could not load mbuf DMA map, trying to linearize "
1048 "mbuf: ndmasegs=%d, len=%d, error=%d\n",
1049 ndmasegs, m->m_pkthdr.len, error);
1051 m_d = m_collapse(m, M_NOWAIT, 16);
1059 sc->tx_defrag_packets++;
1061 error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1062 data->dma_map, m, dma_seg, &ndmasegs, 0);
1064 device_printf(sc->dev, "could not load mbuf DMA map: "
1065 "ndmasegs=%d, len=%d, error=%d\n",
1066 ndmasegs, m->m_pkthdr.len, error);
1072 if (m->m_pkthdr.len == 0)
1075 /* determine how many Tx descs are required */
1076 ndescs = 1 + ndmasegs / 2;
1077 if ((ring->desc_queued + ndescs) >
1078 (RT_SOFTC_TX_RING_DESC_COUNT - 2)) {
1079 RT_DPRINTF(sc, RT_DEBUG_TX,
1080 "there are not enough Tx descs\n");
1082 sc->no_tx_desc_avail++;
1084 bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1091 /* set up Tx descs */
1092 for (i = 0; i < ndmasegs; i += 2) {
1094 /* TODO: this needs to be refined as MT7620 for example has
1095 * a different word3 layout than RT305x and RT5350 (the last
1096 * one doesn't use word3 at all). And so does MT7621...
1099 if (sc->rt_chipid != RT_CHIPID_MT7621) {
1100 /* Set destination */
1101 if (sc->rt_chipid != RT_CHIPID_MT7620)
1102 desc->dst = (TXDSCR_DST_PORT_GDMA1);
1104 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1105 desc->dst |= (TXDSCR_IP_CSUM_GEN |
1106 TXDSCR_UDP_CSUM_GEN | TXDSCR_TCP_CSUM_GEN);
1120 desc->sdp0 = htole32(dma_seg[i].ds_addr);
1121 desc->sdl0 = htole16(dma_seg[i].ds_len |
1122 ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 ));
1124 if ((i+1) < ndmasegs) {
1125 desc->sdp1 = htole32(dma_seg[i+1].ds_addr);
1126 desc->sdl1 = htole16(dma_seg[i+1].ds_len |
1127 ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 ));
1133 if ((i+2) < ndmasegs) {
1134 ring->desc_queued++;
1135 ring->desc_cur = (ring->desc_cur + 1) %
1136 RT_SOFTC_TX_RING_DESC_COUNT;
1138 desc = &ring->desc[ring->desc_cur];
1141 RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, "
1142 "DMA ds_len=%d/%d/%d/%d/%d\n",
1143 m->m_pkthdr.len, ndmasegs,
1144 (int) dma_seg[0].ds_len,
1145 (int) dma_seg[1].ds_len,
1146 (int) dma_seg[2].ds_len,
1147 (int) dma_seg[3].ds_len,
1148 (int) dma_seg[4].ds_len);
1150 bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
1151 BUS_DMASYNC_PREWRITE);
1152 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1153 BUS_DMASYNC_PREWRITE);
1154 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1155 BUS_DMASYNC_PREWRITE);
1157 ring->desc_queued++;
1158 ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT;
1160 ring->data_queued++;
1161 ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT;
1164 RT_WRITE(sc, sc->tx_ctx_idx[qid], ring->desc_cur);
1170 * rt_start - start Transmit/Receive
1173 rt_start(struct ifnet *ifp)
1175 struct rt_softc *sc;
1177 int qid = 0 /* XXX must check QoS priority */;
1181 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1185 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1189 m->m_pkthdr.rcvif = NULL;
1191 RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]);
1193 if (sc->tx_ring[qid].data_queued >=
1194 RT_SOFTC_TX_RING_DATA_COUNT) {
1195 RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1197 RT_DPRINTF(sc, RT_DEBUG_TX,
1198 "if_start: Tx ring with qid=%d is full\n", qid);
1202 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1203 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1205 sc->tx_data_queue_full[qid]++;
1210 if (rt_tx_data(sc, m, qid) != 0) {
1211 RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1213 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1218 RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1219 sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT;
1220 callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1225 * rt_update_promisc - set/clear promiscuous mode. Unused yet, because
1226 * filtering done by attached Ethernet switch.
1229 rt_update_promisc(struct ifnet *ifp)
1231 struct rt_softc *sc;
1234 printf("%s: %s promiscuous mode\n",
1235 device_get_nameunit(sc->dev),
1236 (ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
1240 * rt_ioctl - ioctl handler.
1243 rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1245 struct rt_softc *sc;
1247 #ifdef IF_RT_PHY_SUPPORT
1248 struct mii_data *mii;
1249 #endif /* IF_RT_PHY_SUPPORT */
1250 int error, startall;
1253 ifr = (struct ifreq *) data;
1261 if (ifp->if_flags & IFF_UP) {
1262 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1263 if ((ifp->if_flags ^ sc->if_flags) &
1265 rt_update_promisc(ifp);
1271 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1274 sc->if_flags = ifp->if_flags;
1275 RT_SOFTC_UNLOCK(sc);
1279 #ifdef IF_RT_PHY_SUPPORT
1280 mii = device_get_softc(sc->rt_miibus);
1281 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1283 error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd);
1284 #endif /* IF_RT_PHY_SUPPORT */
1287 error = ether_ioctl(ifp, cmd, data);
1294 * rt_periodic - Handler of PERIODIC interrupt
1297 rt_periodic(void *arg)
1299 struct rt_softc *sc;
1302 RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic\n");
1303 taskqueue_enqueue(sc->taskqueue, &sc->periodic_task);
1307 * rt_tx_watchdog - Handler of TX Watchdog
1310 rt_tx_watchdog(void *arg)
1312 struct rt_softc *sc;
1318 if (sc->tx_timer == 0)
1321 if (--sc->tx_timer == 0) {
1322 device_printf(sc->dev, "Tx watchdog timeout: resetting\n");
1325 * XXX: Commented out, because reset break input.
1330 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1331 sc->tx_watchdog_timeouts++;
1333 callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1337 * rt_cnt_ppe_af - Handler of PPE Counter Table Almost Full interrupt
1340 rt_cnt_ppe_af(struct rt_softc *sc)
1343 RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n");
1347 * rt_cnt_gdm_af - Handler of GDMA 1 & 2 Counter Table Almost Full interrupt
1350 rt_cnt_gdm_af(struct rt_softc *sc)
1353 RT_DPRINTF(sc, RT_DEBUG_INTR,
1354 "GDMA 1 & 2 Counter Table Almost Full\n");
1358 * rt_pse_p2_fc - Handler of PSE port2 (GDMA 2) flow control interrupt
1361 rt_pse_p2_fc(struct rt_softc *sc)
1364 RT_DPRINTF(sc, RT_DEBUG_INTR,
1365 "PSE port2 (GDMA 2) flow control asserted.\n");
1369 * rt_gdm_crc_drop - Handler of GDMA 1/2 discard a packet due to CRC error
1373 rt_gdm_crc_drop(struct rt_softc *sc)
1376 RT_DPRINTF(sc, RT_DEBUG_INTR,
1377 "GDMA 1 & 2 discard a packet due to CRC error\n");
1381 * rt_pse_buf_drop - Handler of buffer sharing limitation interrupt
1384 rt_pse_buf_drop(struct rt_softc *sc)
1387 RT_DPRINTF(sc, RT_DEBUG_INTR,
1388 "PSE discards a packet due to buffer sharing limitation\n");
1392 * rt_gdm_other_drop - Handler of discard on other reason interrupt
1395 rt_gdm_other_drop(struct rt_softc *sc)
1398 RT_DPRINTF(sc, RT_DEBUG_INTR,
1399 "GDMA 1 & 2 discard a packet due to other reason\n");
1403 * rt_pse_p1_fc - Handler of PSE port1 (GDMA 1) flow control interrupt
1406 rt_pse_p1_fc(struct rt_softc *sc)
1409 RT_DPRINTF(sc, RT_DEBUG_INTR,
1410 "PSE port1 (GDMA 1) flow control asserted.\n");
1414 * rt_pse_p0_fc - Handler of PSE port0 (CDMA) flow control interrupt
1417 rt_pse_p0_fc(struct rt_softc *sc)
1420 RT_DPRINTF(sc, RT_DEBUG_INTR,
1421 "PSE port0 (CDMA) flow control asserted.\n");
1425 * rt_pse_fq_empty - Handler of PSE free Q empty threshold reached interrupt
1428 rt_pse_fq_empty(struct rt_softc *sc)
1431 RT_DPRINTF(sc, RT_DEBUG_INTR,
1432 "PSE free Q empty threshold reached & forced drop "
1433 "condition occurred.\n");
1437 * rt_intr - main ISR
1442 struct rt_softc *sc;
1449 /* acknowledge interrupts */
1450 status = RT_READ(sc, sc->fe_int_status);
1451 RT_WRITE(sc, sc->fe_int_status, status);
1453 RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1455 if (status == 0xffffffff || /* device likely went away */
1456 status == 0) /* not for us */
1461 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1464 if (status & CNT_PPE_AF)
1467 if (status & CNT_GDM_AF)
1470 if (status & PSE_P2_FC)
1473 if (status & GDM_CRC_DROP)
1474 rt_gdm_crc_drop(sc);
1476 if (status & PSE_BUF_DROP)
1477 rt_pse_buf_drop(sc);
1479 if (status & GDM_OTHER_DROP)
1480 rt_gdm_other_drop(sc);
1482 if (status & PSE_P1_FC)
1485 if (status & PSE_P0_FC)
1488 if (status & PSE_FQ_EMPTY)
1489 rt_pse_fq_empty(sc);
1491 if (status & INT_TX_COHERENT)
1492 rt_tx_coherent_intr(sc);
1494 if (status & INT_RX_COHERENT)
1495 rt_rx_coherent_intr(sc);
1497 if (status & RX_DLY_INT)
1498 rt_rx_delay_intr(sc);
1500 if (status & TX_DLY_INT)
1501 rt_tx_delay_intr(sc);
1503 if (status & INT_RX_DONE)
1506 if (status & INT_TXQ3_DONE)
1509 if (status & INT_TXQ2_DONE)
1512 if (status & INT_TXQ1_DONE)
1515 if (status & INT_TXQ0_DONE)
1520 * rt_rt5350_intr - main ISR for Ralink 5350 SoC
1523 rt_rt5350_intr(void *arg)
1525 struct rt_softc *sc;
1532 /* acknowledge interrupts */
1533 status = RT_READ(sc, sc->fe_int_status);
1534 RT_WRITE(sc, sc->fe_int_status, status);
1536 RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1538 if (status == 0xffffffff || /* device likely went away */
1539 status == 0) /* not for us */
1544 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1547 if (status & RT5350_INT_TX_COHERENT)
1548 rt_tx_coherent_intr(sc);
1549 if (status & RT5350_INT_RX_COHERENT)
1550 rt_rx_coherent_intr(sc);
1551 if (status & RT5350_RX_DLY_INT)
1552 rt_rx_delay_intr(sc);
1553 if (status & RT5350_TX_DLY_INT)
1554 rt_tx_delay_intr(sc);
1555 if (status & RT5350_INT_RXQ1_DONE)
1557 if (status & RT5350_INT_RXQ0_DONE)
1559 if (status & RT5350_INT_TXQ3_DONE)
1561 if (status & RT5350_INT_TXQ2_DONE)
1563 if (status & RT5350_INT_TXQ1_DONE)
1565 if (status & RT5350_INT_TXQ0_DONE)
1570 rt_tx_coherent_intr(struct rt_softc *sc)
1575 RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n");
1577 sc->tx_coherent_interrupts++;
1579 /* restart DMA engine */
1580 tmp = RT_READ(sc, sc->pdma_glo_cfg);
1581 tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN);
1582 RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1584 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
1585 rt_reset_tx_ring(sc, &sc->tx_ring[i]);
1587 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
1588 RT_WRITE(sc, sc->tx_base_ptr[i],
1589 sc->tx_ring[i].desc_phys_addr);
1590 RT_WRITE(sc, sc->tx_max_cnt[i],
1591 RT_SOFTC_TX_RING_DESC_COUNT);
1592 RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
1599 * rt_rx_coherent_intr
1602 rt_rx_coherent_intr(struct rt_softc *sc)
1607 RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n");
1609 sc->rx_coherent_interrupts++;
1611 /* restart DMA engine */
1612 tmp = RT_READ(sc, sc->pdma_glo_cfg);
1613 tmp &= ~(FE_RX_DMA_EN);
1614 RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1617 for (i = 0; i < sc->rx_ring_count; i++)
1618 rt_reset_rx_ring(sc, &sc->rx_ring[i]);
1620 for (i = 0; i < sc->rx_ring_count; i++) {
1621 RT_WRITE(sc, sc->rx_base_ptr[i],
1622 sc->rx_ring[i].desc_phys_addr);
1623 RT_WRITE(sc, sc->rx_max_cnt[i],
1624 RT_SOFTC_RX_RING_DATA_COUNT);
1625 RT_WRITE(sc, sc->rx_calc_idx[i],
1626 RT_SOFTC_RX_RING_DATA_COUNT - 1);
1633 * rt_rx_intr - a packet received
1636 rt_rx_intr(struct rt_softc *sc, int qid)
1638 KASSERT(qid >= 0 && qid < sc->rx_ring_count,
1639 ("%s: Rx interrupt: invalid qid=%d\n",
1640 device_get_nameunit(sc->dev), qid));
1642 RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n");
1643 sc->rx_interrupts[qid]++;
1646 if (!(sc->intr_disable_mask & (sc->int_rx_done_mask << qid))) {
1647 rt_intr_disable(sc, (sc->int_rx_done_mask << qid));
1648 taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1651 sc->intr_pending_mask |= (sc->int_rx_done_mask << qid);
1652 RT_SOFTC_UNLOCK(sc);
1656 rt_rx_delay_intr(struct rt_softc *sc)
1659 RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n");
1660 sc->rx_delay_interrupts++;
1664 rt_tx_delay_intr(struct rt_softc *sc)
1667 RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n");
1668 sc->tx_delay_interrupts++;
1672 * rt_tx_intr - Transsmition of packet done
1675 rt_tx_intr(struct rt_softc *sc, int qid)
1678 KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1679 ("%s: Tx interrupt: invalid qid=%d\n",
1680 device_get_nameunit(sc->dev), qid));
1682 RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid);
1684 sc->tx_interrupts[qid]++;
1687 if (!(sc->intr_disable_mask & (sc->int_tx_done_mask << qid))) {
1688 rt_intr_disable(sc, (sc->int_tx_done_mask << qid));
1689 taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1692 sc->intr_pending_mask |= (sc->int_tx_done_mask << qid);
1693 RT_SOFTC_UNLOCK(sc);
1697 * rt_rx_done_task - run RX task
1700 rt_rx_done_task(void *context, int pending)
1702 struct rt_softc *sc;
1709 RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n");
1711 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1714 sc->intr_pending_mask &= ~sc->int_rx_done_mask;
1716 again = rt_rx_eof(sc, &sc->rx_ring[0], sc->rx_process_limit);
1720 if ((sc->intr_pending_mask & sc->int_rx_done_mask) || again) {
1721 RT_DPRINTF(sc, RT_DEBUG_RX,
1722 "Rx done task: scheduling again\n");
1723 taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1725 rt_intr_enable(sc, sc->int_rx_done_mask);
1728 RT_SOFTC_UNLOCK(sc);
1732 * rt_tx_done_task - check for pending TX task in all queues
1735 rt_tx_done_task(void *context, int pending)
1737 struct rt_softc *sc;
1745 RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n");
1747 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1750 for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) {
1751 if (sc->intr_pending_mask & (sc->int_tx_done_mask << i)) {
1752 sc->intr_pending_mask &= ~(sc->int_tx_done_mask << i);
1753 rt_tx_eof(sc, &sc->tx_ring[i]);
1759 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1761 if(sc->rt_chipid == RT_CHIPID_RT5350 ||
1762 sc->rt_chipid == RT_CHIPID_MT7620 ||
1763 sc->rt_chipid == RT_CHIPID_MT7621)
1765 RT5350_INT_TXQ3_DONE |
1766 RT5350_INT_TXQ2_DONE |
1767 RT5350_INT_TXQ1_DONE |
1768 RT5350_INT_TXQ0_DONE);
1778 rt_intr_enable(sc, ~sc->intr_pending_mask &
1779 (sc->intr_disable_mask & intr_mask));
1781 if (sc->intr_pending_mask & intr_mask) {
1782 RT_DPRINTF(sc, RT_DEBUG_TX,
1783 "Tx done task: scheduling again\n");
1784 taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1787 RT_SOFTC_UNLOCK(sc);
1789 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1794 * rt_periodic_task - run periodic task
1797 rt_periodic_task(void *context, int pending)
1799 struct rt_softc *sc;
1805 RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n",
1806 sc->periodic_round);
1808 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1812 sc->periodic_round++;
1813 rt_update_stats(sc);
1815 if ((sc->periodic_round % 10) == 0) {
1816 rt_update_raw_counters(sc);
1820 RT_SOFTC_UNLOCK(sc);
1821 callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
1825 * rt_rx_eof - check for frames that done by DMA engine and pass it into
1826 * network subsystem.
1829 rt_rx_eof(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int limit)
1832 /* struct rt_softc_rx_ring *ring; */
1833 struct rt_rxdesc *desc;
1834 struct rt_softc_rx_data *data;
1835 struct mbuf *m, *mnew;
1836 bus_dma_segment_t segs[1];
1837 bus_dmamap_t dma_map;
1838 uint32_t index, desc_flags;
1839 int error, nsegs, len, nframes;
1842 /* ring = &sc->rx_ring[0]; */
1846 while (limit != 0) {
1847 index = RT_READ(sc, sc->rx_drx_idx[0]);
1848 if (ring->cur == index)
1851 desc = &ring->desc[ring->cur];
1852 data = &ring->data[ring->cur];
1854 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1855 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1858 if ( sc->debug & RT_DEBUG_RX ) {
1859 printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc);
1860 hexdump(desc, 16, 0, 0);
1861 printf("-----------------------------------\n");
1865 /* XXX Sometime device don`t set DDONE bit */
1867 if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) {
1868 RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n");
1873 len = le16toh(desc->sdl0) & 0x3fff;
1874 RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len);
1878 mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1881 sc->rx_mbuf_alloc_errors++;
1882 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1886 mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE;
1888 error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1889 ring->spare_dma_map, mnew, segs, &nsegs, BUS_DMA_NOWAIT);
1891 RT_DPRINTF(sc, RT_DEBUG_RX,
1892 "could not load Rx mbuf DMA map: "
1893 "error=%d, nsegs=%d\n",
1898 sc->rx_mbuf_dmamap_errors++;
1899 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1904 KASSERT(nsegs == 1, ("%s: too many DMA segments",
1905 device_get_nameunit(sc->dev)));
1907 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1908 BUS_DMASYNC_POSTREAD);
1909 bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1911 dma_map = data->dma_map;
1912 data->dma_map = ring->spare_dma_map;
1913 ring->spare_dma_map = dma_map;
1915 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1916 BUS_DMASYNC_PREREAD);
1919 desc_flags = desc->word3;
1922 /* Add 2 for proper align of RX IP header */
1923 desc->sdp0 = htole32(segs[0].ds_addr+2);
1924 desc->sdl0 = htole32(segs[0].ds_len-2);
1927 RT_DPRINTF(sc, RT_DEBUG_RX,
1928 "Rx frame: rxdesc flags=0x%08x\n", desc_flags);
1930 m->m_pkthdr.rcvif = ifp;
1931 /* Add 2 to fix data align, after sdp0 = addr + 2 */
1933 m->m_pkthdr.len = m->m_len = len;
1935 /* check for crc errors */
1936 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1937 /*check for valid checksum*/
1938 if (desc_flags & (sc->csum_fail_ip|sc->csum_fail_l4)) {
1939 RT_DPRINTF(sc, RT_DEBUG_RX,
1940 "rxdesc: crc error\n");
1942 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1944 if (!(ifp->if_flags & IFF_PROMISC)) {
1949 if ((desc_flags & sc->csum_fail_ip) == 0) {
1950 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1951 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1952 m->m_pkthdr.csum_data = 0xffff;
1954 m->m_flags &= ~M_HASFCS;
1957 (*ifp->if_input)(ifp, m);
1959 desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
1961 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1962 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1964 ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT;
1970 RT_WRITE(sc, sc->rx_calc_idx[0],
1971 RT_SOFTC_RX_RING_DATA_COUNT - 1);
1973 RT_WRITE(sc, sc->rx_calc_idx[0],
1976 RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes);
1978 sc->rx_packets += nframes;
1980 return (limit == 0);
1984 * rt_tx_eof - check for successful transmitted frames and mark their
1985 * descriptor as free.
1988 rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
1991 struct rt_txdesc *desc;
1992 struct rt_softc_tx_data *data;
1994 int ndescs, nframes;
2002 index = RT_READ(sc, sc->tx_dtx_idx[ring->qid]);
2003 if (ring->desc_next == index)
2008 desc = &ring->desc[ring->desc_next];
2010 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2011 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2013 if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) ||
2014 desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) {
2017 data = &ring->data[ring->data_next];
2019 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2020 BUS_DMASYNC_POSTWRITE);
2021 bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2027 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2029 RT_SOFTC_TX_RING_LOCK(ring);
2030 ring->data_queued--;
2031 ring->data_next = (ring->data_next + 1) %
2032 RT_SOFTC_TX_RING_DATA_COUNT;
2033 RT_SOFTC_TX_RING_UNLOCK(ring);
2036 desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE);
2038 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2039 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2041 RT_SOFTC_TX_RING_LOCK(ring);
2042 ring->desc_queued--;
2043 ring->desc_next = (ring->desc_next + 1) %
2044 RT_SOFTC_TX_RING_DESC_COUNT;
2045 RT_SOFTC_TX_RING_UNLOCK(ring);
2048 RT_DPRINTF(sc, RT_DEBUG_TX,
2049 "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs,
2054 * rt_update_stats - query statistics counters and update related variables.
2057 rt_update_stats(struct rt_softc *sc)
2062 RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n");
2063 /* XXX do update stats here */
2067 * rt_watchdog - reinit device on watchdog event.
2070 rt_watchdog(struct rt_softc *sc)
2076 if(sc->rt_chipid != RT_CHIPID_RT5350 &&
2077 sc->rt_chipid != RT_CHIPID_MT7620 &&
2078 sc->rt_chipid != RT_CHIPID_MT7621) {
2079 tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA);
2081 RT_DPRINTF(sc, RT_DEBUG_WATCHDOG,
2082 "watchdog: PSE_IQ_STA=0x%08x\n", tmp);
2084 /* XXX: do not reset */
2086 if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) {
2087 sc->tx_queue_not_empty[0]++;
2089 for (ntries = 0; ntries < 10; ntries++) {
2090 tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2091 if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0)
2098 if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) {
2099 sc->tx_queue_not_empty[1]++;
2101 for (ntries = 0; ntries < 10; ntries++) {
2102 tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2103 if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0)
2113 * rt_update_raw_counters - update counters.
2116 rt_update_raw_counters(struct rt_softc *sc)
2119 sc->tx_bytes += RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0);
2120 sc->tx_packets += RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0);
2121 sc->tx_skip += RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0);
2122 sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0);
2124 sc->rx_bytes += RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0);
2125 sc->rx_packets += RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0);
2126 sc->rx_crc_err += RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0);
2127 sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0);
2128 sc->rx_long_err += RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0);
2129 sc->rx_phy_err += RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0);
2130 sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0);
2134 rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask)
2138 sc->intr_disable_mask &= ~intr_mask;
2139 tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2140 RT_WRITE(sc, sc->fe_int_enable, tmp);
2144 rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask)
2148 sc->intr_disable_mask |= intr_mask;
2149 tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2150 RT_WRITE(sc, sc->fe_int_enable, tmp);
2154 * rt_txrx_enable - enable TX/RX DMA
2157 rt_txrx_enable(struct rt_softc *sc)
2165 /* enable Tx/Rx DMA engine */
2166 for (ntries = 0; ntries < 200; ntries++) {
2167 tmp = RT_READ(sc, sc->pdma_glo_cfg);
2168 if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
2174 if (ntries == 200) {
2175 device_printf(sc->dev, "timeout waiting for DMA engine\n");
2181 tmp |= FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN;
2182 RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
2184 /* XXX set Rx filter */
2189 * rt_alloc_rx_ring - allocate RX DMA ring buffer
2192 rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int qid)
2194 struct rt_rxdesc *desc;
2195 struct rt_softc_rx_data *data;
2196 bus_dma_segment_t segs[1];
2197 int i, nsegs, error;
2199 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2200 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2201 RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1,
2202 RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2203 0, NULL, NULL, &ring->desc_dma_tag);
2205 device_printf(sc->dev,
2206 "could not create Rx desc DMA tag\n");
2210 error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2211 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2213 device_printf(sc->dev,
2214 "could not allocate Rx desc DMA memory\n");
2218 error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2220 RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2221 rt_dma_map_addr, &ring->desc_phys_addr, 0);
2223 device_printf(sc->dev, "could not load Rx desc DMA map\n");
2227 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2228 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2229 MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL,
2230 &ring->data_dma_tag);
2232 device_printf(sc->dev,
2233 "could not create Rx data DMA tag\n");
2237 for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2238 desc = &ring->desc[i];
2239 data = &ring->data[i];
2241 error = bus_dmamap_create(ring->data_dma_tag, 0,
2244 device_printf(sc->dev, "could not create Rx data DMA "
2249 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
2251 if (data->m == NULL) {
2252 device_printf(sc->dev, "could not allocate Rx mbuf\n");
2257 data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE;
2259 error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
2260 data->dma_map, data->m, segs, &nsegs, BUS_DMA_NOWAIT);
2262 device_printf(sc->dev,
2263 "could not load Rx mbuf DMA map\n");
2267 KASSERT(nsegs == 1, ("%s: too many DMA segments",
2268 device_get_nameunit(sc->dev)));
2270 /* Add 2 for proper align of RX IP header */
2271 desc->sdp0 = htole32(segs[0].ds_addr+2);
2272 desc->sdl0 = htole32(segs[0].ds_len-2);
2275 error = bus_dmamap_create(ring->data_dma_tag, 0,
2276 &ring->spare_dma_map);
2278 device_printf(sc->dev,
2279 "could not create Rx spare DMA map\n");
2283 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2284 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2289 rt_free_rx_ring(sc, ring);
2294 * rt_reset_rx_ring - reset RX ring buffer
2297 rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2299 struct rt_rxdesc *desc;
2302 for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2303 desc = &ring->desc[i];
2304 desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
2307 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2308 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2313 * rt_free_rx_ring - free memory used by RX ring buffer
2316 rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2318 struct rt_softc_rx_data *data;
2321 if (ring->desc != NULL) {
2322 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2323 BUS_DMASYNC_POSTWRITE);
2324 bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2325 bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2326 ring->desc_dma_map);
2329 if (ring->desc_dma_tag != NULL)
2330 bus_dma_tag_destroy(ring->desc_dma_tag);
2332 for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2333 data = &ring->data[i];
2335 if (data->m != NULL) {
2336 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2337 BUS_DMASYNC_POSTREAD);
2338 bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2342 if (data->dma_map != NULL)
2343 bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2346 if (ring->spare_dma_map != NULL)
2347 bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map);
2349 if (ring->data_dma_tag != NULL)
2350 bus_dma_tag_destroy(ring->data_dma_tag);
2354 * rt_alloc_tx_ring - allocate TX ring buffer
2357 rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid)
2359 struct rt_softc_tx_data *data;
2362 mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF);
2364 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2365 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2366 RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1,
2367 RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc),
2368 0, NULL, NULL, &ring->desc_dma_tag);
2370 device_printf(sc->dev,
2371 "could not create Tx desc DMA tag\n");
2375 error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2376 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2378 device_printf(sc->dev,
2379 "could not allocate Tx desc DMA memory\n");
2383 error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2384 ring->desc, (RT_SOFTC_TX_RING_DESC_COUNT *
2385 sizeof(struct rt_txdesc)), rt_dma_map_addr,
2386 &ring->desc_phys_addr, 0);
2388 device_printf(sc->dev, "could not load Tx desc DMA map\n");
2392 ring->desc_queued = 0;
2394 ring->desc_next = 0;
2396 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2397 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2398 RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1,
2399 RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2400 0, NULL, NULL, &ring->seg0_dma_tag);
2402 device_printf(sc->dev,
2403 "could not create Tx seg0 DMA tag\n");
2407 error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0,
2408 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map);
2410 device_printf(sc->dev,
2411 "could not allocate Tx seg0 DMA memory\n");
2415 error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map,
2417 RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2418 rt_dma_map_addr, &ring->seg0_phys_addr, 0);
2420 device_printf(sc->dev, "could not load Tx seg0 DMA map\n");
2424 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2425 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2426 MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL,
2427 &ring->data_dma_tag);
2429 device_printf(sc->dev,
2430 "could not create Tx data DMA tag\n");
2434 for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2435 data = &ring->data[i];
2437 error = bus_dmamap_create(ring->data_dma_tag, 0,
2440 device_printf(sc->dev, "could not create Tx data DMA "
2446 ring->data_queued = 0;
2448 ring->data_next = 0;
2454 rt_free_tx_ring(sc, ring);
2459 * rt_reset_tx_ring - reset TX ring buffer to empty state
2462 rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2464 struct rt_softc_tx_data *data;
2465 struct rt_txdesc *desc;
2468 for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) {
2469 desc = &ring->desc[i];
2475 ring->desc_queued = 0;
2477 ring->desc_next = 0;
2479 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2480 BUS_DMASYNC_PREWRITE);
2482 bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2483 BUS_DMASYNC_PREWRITE);
2485 for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2486 data = &ring->data[i];
2488 if (data->m != NULL) {
2489 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2490 BUS_DMASYNC_POSTWRITE);
2491 bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2497 ring->data_queued = 0;
2499 ring->data_next = 0;
2503 * rt_free_tx_ring - free RX ring buffer
2506 rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2508 struct rt_softc_tx_data *data;
2511 if (ring->desc != NULL) {
2512 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2513 BUS_DMASYNC_POSTWRITE);
2514 bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2515 bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2516 ring->desc_dma_map);
2519 if (ring->desc_dma_tag != NULL)
2520 bus_dma_tag_destroy(ring->desc_dma_tag);
2522 if (ring->seg0 != NULL) {
2523 bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2524 BUS_DMASYNC_POSTWRITE);
2525 bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map);
2526 bus_dmamem_free(ring->seg0_dma_tag, ring->seg0,
2527 ring->seg0_dma_map);
2530 if (ring->seg0_dma_tag != NULL)
2531 bus_dma_tag_destroy(ring->seg0_dma_tag);
2533 for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2534 data = &ring->data[i];
2536 if (data->m != NULL) {
2537 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2538 BUS_DMASYNC_POSTWRITE);
2539 bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2543 if (data->dma_map != NULL)
2544 bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2547 if (ring->data_dma_tag != NULL)
2548 bus_dma_tag_destroy(ring->data_dma_tag);
2550 mtx_destroy(&ring->lock);
2554 * rt_dma_map_addr - get address of busdma segment
2557 rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2562 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2564 *(bus_addr_t *) arg = segs[0].ds_addr;
2568 * rt_sysctl_attach - attach sysctl nodes for NIC counters.
2571 rt_sysctl_attach(struct rt_softc *sc)
2573 struct sysctl_ctx_list *ctx;
2574 struct sysctl_oid *tree;
2575 struct sysctl_oid *stats;
2577 ctx = device_get_sysctl_ctx(sc->dev);
2578 tree = device_get_sysctl_tree(sc->dev);
2580 /* statistic counters */
2581 stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2582 "stats", CTLFLAG_RD, 0, "statistic");
2584 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2585 "interrupts", CTLFLAG_RD, &sc->interrupts,
2588 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2589 "tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts,
2590 "Tx coherent interrupts");
2592 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2593 "rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts,
2594 "Rx coherent interrupts");
2596 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2597 "rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts[0],
2600 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2601 "rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts,
2602 "Rx delay interrupts");
2604 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2605 "TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3],
2606 "Tx AC3 interrupts");
2608 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2609 "TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2],
2610 "Tx AC2 interrupts");
2612 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2613 "TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1],
2614 "Tx AC1 interrupts");
2616 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2617 "TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0],
2618 "Tx AC0 interrupts");
2620 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2621 "tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts,
2622 "Tx delay interrupts");
2624 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2625 "TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued,
2626 0, "Tx AC3 descriptors queued");
2628 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2629 "TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued,
2630 0, "Tx AC3 data queued");
2632 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2633 "TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued,
2634 0, "Tx AC2 descriptors queued");
2636 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2637 "TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued,
2638 0, "Tx AC2 data queued");
2640 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2641 "TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued,
2642 0, "Tx AC1 descriptors queued");
2644 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2645 "TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued,
2646 0, "Tx AC1 data queued");
2648 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2649 "TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued,
2650 0, "Tx AC0 descriptors queued");
2652 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2653 "TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued,
2654 0, "Tx AC0 data queued");
2656 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2657 "TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3],
2658 "Tx AC3 data queue full");
2660 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2661 "TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2],
2662 "Tx AC2 data queue full");
2664 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2665 "TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1],
2666 "Tx AC1 data queue full");
2668 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2669 "TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0],
2670 "Tx AC0 data queue full");
2672 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2673 "tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts,
2674 "Tx watchdog timeouts");
2676 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2677 "tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets,
2678 "Tx defragmented packets");
2680 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2681 "no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail,
2682 "no Tx descriptors available");
2684 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2685 "rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors,
2686 "Rx mbuf allocation errors");
2688 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2689 "rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors,
2690 "Rx mbuf DMA mapping errors");
2692 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2693 "tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0],
2694 "Tx queue 0 not empty");
2696 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2697 "tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1],
2698 "Tx queue 1 not empty");
2700 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2701 "rx_packets", CTLFLAG_RD, &sc->rx_packets,
2704 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2705 "rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err,
2708 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2709 "rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err,
2712 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2713 "rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets,
2714 "Rx duplicate packets");
2716 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2717 "rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows,
2718 "Rx FIFO overflows");
2720 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2721 "rx_bytes", CTLFLAG_RD, &sc->rx_bytes,
2724 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2725 "rx_long_err", CTLFLAG_RD, &sc->rx_long_err,
2726 "Rx too long frame errors");
2728 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2729 "rx_short_err", CTLFLAG_RD, &sc->rx_short_err,
2730 "Rx too short frame errors");
2732 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2733 "tx_bytes", CTLFLAG_RD, &sc->tx_bytes,
2736 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2737 "tx_packets", CTLFLAG_RD, &sc->tx_packets,
2740 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2741 "tx_skip", CTLFLAG_RD, &sc->tx_skip,
2742 "Tx skip count for GDMA ports");
2744 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2745 "tx_collision", CTLFLAG_RD, &sc->tx_collision,
2746 "Tx collision count for GDMA ports");
2749 #if defined(IF_RT_PHY_SUPPORT) || defined(RT_MDIO)
2750 /* This code is only work RT2880 and same chip. */
2751 /* TODO: make RT3052 and later support code. But nobody need it? */
2753 rt_miibus_readreg(device_t dev, int phy, int reg)
2755 struct rt_softc *sc = device_get_softc(dev);
2759 * PSEUDO_PHYAD is a special value for indicate switch attached.
2760 * No one PHY use PSEUDO_PHYAD (0x1e) address.
2764 /* Fake PHY ID for bfeswitch attach */
2767 return (BMSR_EXTSTAT|BMSR_MEDIAMASK);
2769 return (0x40); /* As result of faking */
2770 case MII_PHYIDR2: /* PHY will detect as */
2771 return (0x6250); /* bfeswitch */
2776 /* Wait prev command done if any */
2777 while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2778 dat = ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) |
2779 ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK);
2780 RT_WRITE(sc, MDIO_ACCESS, dat);
2781 RT_WRITE(sc, MDIO_ACCESS, dat | MDIO_CMD_ONGO);
2782 while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2784 return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK);
2788 rt_miibus_writereg(device_t dev, int phy, int reg, int val)
2790 struct rt_softc *sc = device_get_softc(dev);
2793 /* Wait prev command done if any */
2794 while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2796 ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) |
2797 ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) |
2798 (val & MDIO_PHY_DATA_MASK);
2799 RT_WRITE(sc, MDIO_ACCESS, dat);
2800 RT_WRITE(sc, MDIO_ACCESS, dat | MDIO_CMD_ONGO);
2801 while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2807 #ifdef IF_RT_PHY_SUPPORT
2809 rt_miibus_statchg(device_t dev)
2811 struct rt_softc *sc = device_get_softc(dev);
2812 struct mii_data *mii;
2814 mii = device_get_softc(sc->rt_miibus);
2816 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2817 (IFM_ACTIVE | IFM_AVALID)) {
2818 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2821 /* XXX check link here */
2829 #endif /* IF_RT_PHY_SUPPORT */
2831 static device_method_t rt_dev_methods[] =
2833 DEVMETHOD(device_probe, rt_probe),
2834 DEVMETHOD(device_attach, rt_attach),
2835 DEVMETHOD(device_detach, rt_detach),
2836 DEVMETHOD(device_shutdown, rt_shutdown),
2837 DEVMETHOD(device_suspend, rt_suspend),
2838 DEVMETHOD(device_resume, rt_resume),
2840 #ifdef IF_RT_PHY_SUPPORT
2842 DEVMETHOD(miibus_readreg, rt_miibus_readreg),
2843 DEVMETHOD(miibus_writereg, rt_miibus_writereg),
2844 DEVMETHOD(miibus_statchg, rt_miibus_statchg),
2850 static driver_t rt_driver =
2854 sizeof(struct rt_softc)
2857 static devclass_t rt_dev_class;
2859 DRIVER_MODULE(rt, nexus, rt_driver, rt_dev_class, 0, 0);
2861 DRIVER_MODULE(rt, simplebus, rt_driver, rt_dev_class, 0, 0);
2864 MODULE_DEPEND(rt, ether, 1, 1, 1);
2865 MODULE_DEPEND(rt, miibus, 1, 1, 1);
2868 MODULE_DEPEND(rt, mdio, 1, 1, 1);
2870 static int rtmdio_probe(device_t);
2871 static int rtmdio_attach(device_t);
2872 static int rtmdio_detach(device_t);
2874 static struct mtx miibus_mtx;
2876 MTX_SYSINIT(miibus_mtx, &miibus_mtx, "rt mii lock", MTX_DEF);
2879 * Declare an additional, separate driver for accessing the MDIO bus.
2881 static device_method_t rtmdio_methods[] = {
2882 /* Device interface */
2883 DEVMETHOD(device_probe, rtmdio_probe),
2884 DEVMETHOD(device_attach, rtmdio_attach),
2885 DEVMETHOD(device_detach, rtmdio_detach),
2888 DEVMETHOD(bus_add_child, device_add_child_ordered),
2891 DEVMETHOD(mdio_readreg, rt_miibus_readreg),
2892 DEVMETHOD(mdio_writereg, rt_miibus_writereg),
2895 DEFINE_CLASS_0(rtmdio, rtmdio_driver, rtmdio_methods,
2896 sizeof(struct rt_softc));
2897 static devclass_t rtmdio_devclass;
2899 DRIVER_MODULE(miiproxy, rt, miiproxy_driver, miiproxy_devclass, 0, 0);
2900 DRIVER_MODULE(rtmdio, simplebus, rtmdio_driver, rtmdio_devclass, 0, 0);
2901 DRIVER_MODULE(mdio, rtmdio, mdio_driver, mdio_devclass, 0, 0);
2904 rtmdio_probe(device_t dev)
2906 if (!ofw_bus_status_okay(dev))
2909 if (!ofw_bus_is_compatible(dev, "ralink,rt2880-mdio"))
2912 device_set_desc(dev, "FV built-in ethernet interface, MDIO controller");
2917 rtmdio_attach(device_t dev)
2919 struct rt_softc *sc;
2922 sc = device_get_softc(dev);
2925 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2926 &sc->mem_rid, RF_ACTIVE | RF_SHAREABLE);
2927 if (sc->mem == NULL) {
2928 device_printf(dev, "couldn't map memory\n");
2933 sc->bst = rman_get_bustag(sc->mem);
2934 sc->bsh = rman_get_bushandle(sc->mem);
2936 bus_generic_probe(dev);
2937 bus_enumerate_hinted_children(dev);
2938 error = bus_generic_attach(dev);
2944 rtmdio_detach(device_t dev)