2 * Copyright (c) 2009, Oleksandr Tymoshenko
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 * AR71XX gigabit ethernet driver
34 #ifdef HAVE_KERNEL_OPTION_HEADERS
35 #include "opt_device_polling.h"
38 #include <sys/param.h>
39 #include <sys/endian.h>
40 #include <sys/systm.h>
41 #include <sys/sockio.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/module.h>
46 #include <sys/socket.h>
47 #include <sys/taskqueue.h>
48 #include <sys/sysctl.h>
51 #include <net/if_arp.h>
52 #include <net/ethernet.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_types.h>
59 #include <machine/bus.h>
60 #include <machine/cache.h>
61 #include <machine/resource.h>
62 #include <vm/vm_param.h>
65 #include <machine/pmap.h>
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
72 #include <dev/pci/pcireg.h>
73 #include <dev/pci/pcivar.h>
75 MODULE_DEPEND(arge, ether, 1, 1, 1);
76 MODULE_DEPEND(arge, miibus, 1, 1, 1);
78 #include "miibus_if.h"
80 #include <mips/atheros/ar71xxreg.h>
81 #include <mips/atheros/if_argevar.h>
82 #include <mips/atheros/ar71xx_cpudef.h>
86 #define dprintf printf
88 #define dprintf(x, arg...)
91 static int arge_attach(device_t);
92 static int arge_detach(device_t);
93 static void arge_flush_ddr(struct arge_softc *);
94 static int arge_ifmedia_upd(struct ifnet *);
95 static void arge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
96 static int arge_ioctl(struct ifnet *, u_long, caddr_t);
97 static void arge_init(void *);
98 static void arge_init_locked(struct arge_softc *);
99 static void arge_link_task(void *, int);
100 static void arge_set_pll(struct arge_softc *, int, int);
101 static int arge_miibus_readreg(device_t, int, int);
102 static void arge_miibus_statchg(device_t);
103 static int arge_miibus_writereg(device_t, int, int, int);
104 static int arge_probe(device_t);
105 static void arge_reset_dma(struct arge_softc *);
106 static int arge_resume(device_t);
107 static int arge_rx_ring_init(struct arge_softc *);
108 static int arge_tx_ring_init(struct arge_softc *);
109 #ifdef DEVICE_POLLING
110 static int arge_poll(struct ifnet *, enum poll_cmd, int);
112 static int arge_shutdown(device_t);
113 static void arge_start(struct ifnet *);
114 static void arge_start_locked(struct ifnet *);
115 static void arge_stop(struct arge_softc *);
116 static int arge_suspend(device_t);
118 static int arge_rx_locked(struct arge_softc *);
119 static void arge_tx_locked(struct arge_softc *);
120 static void arge_intr(void *);
121 static int arge_intr_filter(void *);
122 static void arge_tick(void *);
125 * ifmedia callbacks for multiPHY MAC
127 void arge_multiphy_mediastatus(struct ifnet *, struct ifmediareq *);
128 int arge_multiphy_mediachange(struct ifnet *);
130 static void arge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
131 static int arge_dma_alloc(struct arge_softc *);
132 static void arge_dma_free(struct arge_softc *);
133 static int arge_newbuf(struct arge_softc *, int);
134 static __inline void arge_fixup_rx(struct mbuf *);
136 static device_method_t arge_methods[] = {
137 /* Device interface */
138 DEVMETHOD(device_probe, arge_probe),
139 DEVMETHOD(device_attach, arge_attach),
140 DEVMETHOD(device_detach, arge_detach),
141 DEVMETHOD(device_suspend, arge_suspend),
142 DEVMETHOD(device_resume, arge_resume),
143 DEVMETHOD(device_shutdown, arge_shutdown),
146 DEVMETHOD(bus_print_child, bus_generic_print_child),
147 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
150 DEVMETHOD(miibus_readreg, arge_miibus_readreg),
151 DEVMETHOD(miibus_writereg, arge_miibus_writereg),
152 DEVMETHOD(miibus_statchg, arge_miibus_statchg),
157 static driver_t arge_driver = {
160 sizeof(struct arge_softc)
163 static devclass_t arge_devclass;
165 DRIVER_MODULE(arge, nexus, arge_driver, arge_devclass, 0, 0);
166 DRIVER_MODULE(miibus, arge, miibus_driver, miibus_devclass, 0, 0);
169 * RedBoot passes MAC address to entry point as environment
170 * variable. platfrom_start parses it and stores in this variable
172 extern uint32_t ar711_base_mac[ETHER_ADDR_LEN];
174 static struct mtx miibus_mtx;
176 MTX_SYSINIT(miibus_mtx, &miibus_mtx, "arge mii lock", MTX_DEF);
183 arge_flush_ddr(struct arge_softc *sc)
185 if (sc->arge_mac_unit == 0)
186 ar71xx_device_flush_ddr_ge0();
188 ar71xx_device_flush_ddr_ge1();
192 arge_probe(device_t dev)
195 device_set_desc(dev, "Atheros AR71xx built-in ethernet interface");
200 arge_attach_sysctl(device_t dev)
202 struct arge_softc *sc = device_get_softc(dev);
203 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
204 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
206 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
207 "debug", CTLFLAG_RW, &sc->arge_debug, 0,
208 "arge interface debugging flags");
210 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
211 "tx_pkts_aligned", CTLFLAG_RW, &sc->stats.tx_pkts_aligned, 0,
212 "number of TX aligned packets");
214 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
215 "tx_pkts_unaligned", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned, 0,
216 "number of TX unaligned packets");
220 arge_attach(device_t dev)
222 uint8_t eaddr[ETHER_ADDR_LEN];
224 struct arge_softc *sc;
225 int error = 0, rid, phymask;
227 int is_base_mac_empty, i, phys_total;
230 sc = device_get_softc(dev);
232 sc->arge_mac_unit = device_get_unit(dev);
234 KASSERT(((sc->arge_mac_unit == 0) || (sc->arge_mac_unit == 1)),
235 ("if_arge: Only MAC0 and MAC1 supported"));
238 * Get which PHY of 5 available we should use for this unit
240 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
241 "phymask", &phymask) != 0) {
243 * Use port 4 (WAN) for GE0. For any other port use
244 * its PHY the same as its unit number
246 if (sc->arge_mac_unit == 0)
249 /* Use all phys up to 4 */
250 phymask = (1 << 4) - 1;
252 device_printf(dev, "No PHY specified, using mask %d\n", phymask);
256 * Get default media & duplex mode, by default its Base100T
259 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
260 "media", &hint) != 0)
264 sc->arge_media_type = IFM_1000_T;
266 sc->arge_media_type = IFM_100_TX;
268 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
269 "fduplex", &hint) != 0)
273 sc->arge_duplex_mode = IFM_FDX;
275 sc->arge_duplex_mode = 0;
277 sc->arge_phymask = phymask;
279 mtx_init(&sc->arge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
281 callout_init_mtx(&sc->arge_stat_callout, &sc->arge_mtx, 0);
282 TASK_INIT(&sc->arge_link_task, 0, arge_link_task, sc);
284 /* Map control/status registers. */
286 sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
287 &sc->arge_rid, RF_ACTIVE);
289 if (sc->arge_res == NULL) {
290 device_printf(dev, "couldn't map memory\n");
295 /* Allocate interrupts */
297 sc->arge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
298 RF_SHAREABLE | RF_ACTIVE);
300 if (sc->arge_irq == NULL) {
301 device_printf(dev, "couldn't map interrupt\n");
306 /* Allocate ifnet structure. */
307 ifp = sc->arge_ifp = if_alloc(IFT_ETHER);
310 device_printf(dev, "couldn't allocate ifnet structure\n");
316 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
317 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
318 ifp->if_ioctl = arge_ioctl;
319 ifp->if_start = arge_start;
320 ifp->if_init = arge_init;
321 sc->arge_if_flags = ifp->if_flags;
323 /* XXX: add real size */
324 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
325 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
326 IFQ_SET_READY(&ifp->if_snd);
328 ifp->if_capenable = ifp->if_capabilities;
329 #ifdef DEVICE_POLLING
330 ifp->if_capabilities |= IFCAP_POLLING;
333 is_base_mac_empty = 1;
334 for (i = 0; i < ETHER_ADDR_LEN; i++) {
335 eaddr[i] = ar711_base_mac[i] & 0xff;
337 is_base_mac_empty = 0;
340 if (is_base_mac_empty) {
342 * No MAC address configured. Generate the random one.
346 "Generating random ethernet address.\n");
352 eaddr[3] = (rnd >> 24) & 0xff;
353 eaddr[4] = (rnd >> 16) & 0xff;
354 eaddr[5] = (rnd >> 8) & 0xff;
357 if (sc->arge_mac_unit != 0)
358 eaddr[5] += sc->arge_mac_unit;
360 if (arge_dma_alloc(sc) != 0) {
365 /* Initialize the MAC block */
367 /* Step 1. Soft-reset MAC */
368 ARGE_SET_BITS(sc, AR71XX_MAC_CFG1, MAC_CFG1_SOFT_RESET);
371 /* Step 2. Punt the MAC core from the central reset register */
372 ar71xx_device_stop(sc->arge_mac_unit == 0 ? RST_RESET_GE0_MAC : RST_RESET_GE1_MAC);
374 ar71xx_device_start(sc->arge_mac_unit == 0 ? RST_RESET_GE0_MAC : RST_RESET_GE1_MAC);
376 /* Step 3. Reconfigure MAC block */
377 ARGE_WRITE(sc, AR71XX_MAC_CFG1,
378 MAC_CFG1_SYNC_RX | MAC_CFG1_RX_ENABLE |
379 MAC_CFG1_SYNC_TX | MAC_CFG1_TX_ENABLE);
381 reg = ARGE_READ(sc, AR71XX_MAC_CFG2);
382 reg |= MAC_CFG2_ENABLE_PADCRC | MAC_CFG2_LENGTH_FIELD ;
383 ARGE_WRITE(sc, AR71XX_MAC_CFG2, reg);
385 ARGE_WRITE(sc, AR71XX_MAC_MAX_FRAME_LEN, 1536);
388 ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_RESET);
390 ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_CLOCK_DIV_28);
394 * Set all Ethernet address registers to the same initial values
395 * set all four addresses to 66-88-aa-cc-dd-ee
397 ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR1,
398 (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]);
399 ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR2, (eaddr[0] << 8) | eaddr[1]);
401 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG0,
402 FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT);
403 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0fff0000);
404 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x00001fff);
406 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMATCH,
407 FIFO_RX_FILTMATCH_DEFAULT);
409 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK,
410 FIFO_RX_FILTMASK_DEFAULT);
413 * Check if we have single-PHY MAC or multi-PHY
416 for (i = 0; i < ARGE_NPHY; i++)
417 if (phymask & (1 << i))
420 if (phys_total == 0) {
425 if (phys_total == 1) {
427 if (mii_phy_probe(dev, &sc->arge_miibus,
428 arge_ifmedia_upd, arge_ifmedia_sts)) {
429 device_printf(dev, "MII without any phy!\n");
435 ifmedia_init(&sc->arge_ifmedia, 0,
436 arge_multiphy_mediachange,
437 arge_multiphy_mediastatus);
438 ifmedia_add(&sc->arge_ifmedia,
439 IFM_ETHER | sc->arge_media_type | sc->arge_duplex_mode,
441 ifmedia_set(&sc->arge_ifmedia,
442 IFM_ETHER | sc->arge_media_type | sc->arge_duplex_mode);
443 arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode);
446 /* Call MI attach routine. */
447 ether_ifattach(ifp, eaddr);
449 /* Hook interrupt last to avoid having to lock softc */
450 error = bus_setup_intr(dev, sc->arge_irq, INTR_TYPE_NET | INTR_MPSAFE,
451 arge_intr_filter, arge_intr, sc, &sc->arge_intrhand);
454 device_printf(dev, "couldn't set up irq\n");
459 /* setup sysctl variables */
460 arge_attach_sysctl(dev);
470 arge_detach(device_t dev)
472 struct arge_softc *sc = device_get_softc(dev);
473 struct ifnet *ifp = sc->arge_ifp;
475 KASSERT(mtx_initialized(&sc->arge_mtx), ("arge mutex not initialized"));
477 /* These should only be active if attach succeeded */
478 if (device_is_attached(dev)) {
481 #ifdef DEVICE_POLLING
482 if (ifp->if_capenable & IFCAP_POLLING)
483 ether_poll_deregister(ifp);
488 taskqueue_drain(taskqueue_swi, &sc->arge_link_task);
493 device_delete_child(dev, sc->arge_miibus);
495 bus_generic_detach(dev);
497 if (sc->arge_intrhand)
498 bus_teardown_intr(dev, sc->arge_irq, sc->arge_intrhand);
501 bus_release_resource(dev, SYS_RES_MEMORY, sc->arge_rid,
509 mtx_destroy(&sc->arge_mtx);
516 arge_suspend(device_t dev)
519 panic("%s", __func__);
524 arge_resume(device_t dev)
527 panic("%s", __func__);
532 arge_shutdown(device_t dev)
534 struct arge_softc *sc;
536 sc = device_get_softc(dev);
546 arge_miibus_readreg(device_t dev, int phy, int reg)
548 struct arge_softc * sc = device_get_softc(dev);
550 uint32_t addr = (phy << MAC_MII_PHY_ADDR_SHIFT)
551 | (reg & MAC_MII_REG_MASK);
553 if ((sc->arge_phymask & (1 << phy)) == 0)
556 mtx_lock(&miibus_mtx);
557 ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
558 ARGE_MII_WRITE(AR71XX_MAC_MII_ADDR, addr);
559 ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_READ);
561 i = ARGE_MII_TIMEOUT;
562 while ((ARGE_MII_READ(AR71XX_MAC_MII_INDICATOR) &
563 MAC_MII_INDICATOR_BUSY) && (i--))
567 mtx_unlock(&miibus_mtx);
568 dprintf("%s timedout\n", __func__);
569 /* XXX: return ERRNO istead? */
573 result = ARGE_MII_READ(AR71XX_MAC_MII_STATUS) & MAC_MII_STATUS_MASK;
574 ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
575 mtx_unlock(&miibus_mtx);
577 dprintf("%s: phy=%d, reg=%02x, value[%08x]=%04x\n", __func__,
578 phy, reg, addr, result);
584 arge_miibus_writereg(device_t dev, int phy, int reg, int data)
586 struct arge_softc * sc = device_get_softc(dev);
589 (phy << MAC_MII_PHY_ADDR_SHIFT) | (reg & MAC_MII_REG_MASK);
592 if ((sc->arge_phymask & (1 << phy)) == 0)
595 dprintf("%s: phy=%d, reg=%02x, value=%04x\n", __func__,
598 mtx_lock(&miibus_mtx);
599 ARGE_MII_WRITE(AR71XX_MAC_MII_ADDR, addr);
600 ARGE_MII_WRITE(AR71XX_MAC_MII_CONTROL, data);
602 i = ARGE_MII_TIMEOUT;
603 while ((ARGE_MII_READ(AR71XX_MAC_MII_INDICATOR) &
604 MAC_MII_INDICATOR_BUSY) && (i--))
607 mtx_unlock(&miibus_mtx);
610 dprintf("%s timedout\n", __func__);
611 /* XXX: return ERRNO istead? */
619 arge_miibus_statchg(device_t dev)
621 struct arge_softc *sc;
623 sc = device_get_softc(dev);
624 taskqueue_enqueue(taskqueue_swi, &sc->arge_link_task);
628 arge_link_task(void *arg, int pending)
630 struct arge_softc *sc;
631 struct mii_data *mii;
633 uint32_t media, duplex;
635 sc = (struct arge_softc *)arg;
638 mii = device_get_softc(sc->arge_miibus);
640 if (mii == NULL || ifp == NULL ||
641 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
646 if (mii->mii_media_status & IFM_ACTIVE) {
648 media = IFM_SUBTYPE(mii->mii_media_active);
650 if (media != IFM_NONE) {
651 sc->arge_link_status = 1;
652 duplex = mii->mii_media_active & IFM_GMASK;
653 arge_set_pll(sc, media, duplex);
656 sc->arge_link_status = 0;
662 arge_set_pll(struct arge_softc *sc, int media, int duplex)
664 uint32_t cfg, ifcontrol, rx_filtmask;
667 cfg = ARGE_READ(sc, AR71XX_MAC_CFG2);
668 cfg &= ~(MAC_CFG2_IFACE_MODE_1000
669 | MAC_CFG2_IFACE_MODE_10_100
670 | MAC_CFG2_FULL_DUPLEX);
672 if (duplex == IFM_FDX)
673 cfg |= MAC_CFG2_FULL_DUPLEX;
675 ifcontrol = ARGE_READ(sc, AR71XX_MAC_IFCONTROL);
676 ifcontrol &= ~MAC_IFCONTROL_SPEED;
678 ARGE_READ(sc, AR71XX_MAC_FIFO_RX_FILTMASK);
679 rx_filtmask &= ~FIFO_RX_MASK_BYTE_MODE;
683 cfg |= MAC_CFG2_IFACE_MODE_10_100;
687 cfg |= MAC_CFG2_IFACE_MODE_10_100;
688 ifcontrol |= MAC_IFCONTROL_SPEED;
693 cfg |= MAC_CFG2_IFACE_MODE_1000;
694 rx_filtmask |= FIFO_RX_MASK_BYTE_MODE;
699 device_printf(sc->arge_dev,
700 "Unknown media %d\n", media);
703 ARGE_WRITE(sc, AR71XX_MAC_FIFO_TX_THRESHOLD,
706 ARGE_WRITE(sc, AR71XX_MAC_CFG2, cfg);
707 ARGE_WRITE(sc, AR71XX_MAC_IFCONTROL, ifcontrol);
708 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK,
711 /* set PLL registers */
712 if (sc->arge_mac_unit == 0)
713 ar71xx_device_set_pll_ge0(if_speed);
715 ar71xx_device_set_pll_ge1(if_speed);
720 arge_reset_dma(struct arge_softc *sc)
722 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, 0);
723 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 0);
725 ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, 0);
726 ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, 0);
728 /* Clear all possible RX interrupts */
729 while(ARGE_READ(sc, AR71XX_DMA_RX_STATUS) & DMA_RX_STATUS_PKT_RECVD)
730 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
733 * Clear all possible TX interrupts
735 while(ARGE_READ(sc, AR71XX_DMA_TX_STATUS) & DMA_TX_STATUS_PKT_SENT)
736 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
741 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS,
742 DMA_RX_STATUS_BUS_ERROR | DMA_RX_STATUS_OVERFLOW);
743 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS,
744 DMA_TX_STATUS_BUS_ERROR | DMA_TX_STATUS_UNDERRUN);
752 struct arge_softc *sc = xsc;
755 arge_init_locked(sc);
760 arge_init_locked(struct arge_softc *sc)
762 struct ifnet *ifp = sc->arge_ifp;
763 struct mii_data *mii;
765 ARGE_LOCK_ASSERT(sc);
769 /* Init circular RX list. */
770 if (arge_rx_ring_init(sc) != 0) {
771 device_printf(sc->arge_dev,
772 "initialization failed: no memory for rx buffers\n");
777 /* Init tx descriptors. */
778 arge_tx_ring_init(sc);
783 if (sc->arge_miibus) {
784 sc->arge_link_status = 0;
785 mii = device_get_softc(sc->arge_miibus);
790 * Sun always shines over multiPHY interface
792 sc->arge_link_status = 1;
795 ifp->if_drv_flags |= IFF_DRV_RUNNING;
796 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
799 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
801 ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, ARGE_TX_RING_ADDR(sc, 0));
802 ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, ARGE_RX_RING_ADDR(sc, 0));
804 /* Start listening */
805 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
807 /* Enable interrupts */
808 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
812 * Return whether the mbuf chain is correctly aligned
813 * for the arge TX engine.
815 * The TX engine requires each fragment to be aligned to a
816 * 4 byte boundary and the size of each fragment except
817 * the last to be a multiple of 4 bytes.
820 arge_mbuf_chain_is_tx_aligned(struct mbuf *m0)
824 for (m = m0; m != NULL; m = m->m_next) {
825 if((mtod(m, intptr_t) & 3) != 0)
827 if ((m->m_next != NULL) && ((m->m_len & 0x03) != 0))
834 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
835 * pointers to the fragment pointers.
838 arge_encap(struct arge_softc *sc, struct mbuf **m_head)
840 struct arge_txdesc *txd;
841 struct arge_desc *desc, *prev_desc;
842 bus_dma_segment_t txsegs[ARGE_MAXFRAGS];
843 int error, i, nsegs, prod, prev_prod;
846 ARGE_LOCK_ASSERT(sc);
849 * Fix mbuf chain, all fragments should be 4 bytes aligned and
853 if (! arge_mbuf_chain_is_tx_aligned(m)) {
854 sc->stats.tx_pkts_unaligned++;
855 m = m_defrag(*m_head, M_DONTWAIT);
862 sc->stats.tx_pkts_aligned++;
864 prod = sc->arge_cdata.arge_tx_prod;
865 txd = &sc->arge_cdata.arge_txdesc[prod];
866 error = bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_tx_tag,
867 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
869 if (error == EFBIG) {
871 } else if (error != 0)
880 /* Check number of available descriptors. */
881 if (sc->arge_cdata.arge_tx_cnt + nsegs >= (ARGE_TX_RING_COUNT - 1)) {
882 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
887 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
888 BUS_DMASYNC_PREWRITE);
891 * Make a list of descriptors for this packet. DMA controller will
892 * walk through it while arge_link is not zero.
895 desc = prev_desc = NULL;
896 for (i = 0; i < nsegs; i++) {
897 desc = &sc->arge_rdata.arge_tx_ring[prod];
898 desc->packet_ctrl = ARGE_DMASIZE(txsegs[i].ds_len);
900 if (txsegs[i].ds_addr & 3)
901 panic("TX packet address unaligned\n");
903 desc->packet_addr = txsegs[i].ds_addr;
905 /* link with previous descriptor */
907 prev_desc->packet_ctrl |= ARGE_DESC_MORE;
909 sc->arge_cdata.arge_tx_cnt++;
911 ARGE_INC(prod, ARGE_TX_RING_COUNT);
914 /* Update producer index. */
915 sc->arge_cdata.arge_tx_prod = prod;
917 /* Sync descriptors. */
918 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
919 sc->arge_cdata.arge_tx_ring_map,
920 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
922 /* Start transmitting */
923 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, DMA_TX_CONTROL_EN);
928 arge_start(struct ifnet *ifp)
930 struct arge_softc *sc;
935 arge_start_locked(ifp);
940 arge_start_locked(struct ifnet *ifp)
942 struct arge_softc *sc;
948 ARGE_LOCK_ASSERT(sc);
950 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
951 IFF_DRV_RUNNING || sc->arge_link_status == 0 )
956 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
957 sc->arge_cdata.arge_tx_cnt < ARGE_TX_RING_COUNT - 2; ) {
958 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
964 * Pack the data into the transmit ring.
966 if (arge_encap(sc, &m_head)) {
969 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
970 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
976 * If there's a BPF listener, bounce a copy of this frame
979 ETHER_BPF_MTAP(ifp, m_head);
984 arge_stop(struct arge_softc *sc)
988 ARGE_LOCK_ASSERT(sc);
991 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
993 callout_stop(&sc->arge_stat_callout);
995 /* mask out interrupts */
996 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1003 arge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1005 struct arge_softc *sc = ifp->if_softc;
1006 struct ifreq *ifr = (struct ifreq *) data;
1007 struct mii_data *mii;
1009 #ifdef DEVICE_POLLING
1016 if ((ifp->if_flags & IFF_UP) != 0) {
1017 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1018 if (((ifp->if_flags ^ sc->arge_if_flags)
1019 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1020 /* XXX: handle promisc & multi flags */
1024 if (!sc->arge_detach)
1025 arge_init_locked(sc);
1027 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1028 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1031 sc->arge_if_flags = ifp->if_flags;
1037 /* XXX: implement SIOCDELMULTI */
1042 if (sc->arge_miibus) {
1043 mii = device_get_softc(sc->arge_miibus);
1044 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1047 error = ifmedia_ioctl(ifp, ifr, &sc->arge_ifmedia, command);
1050 /* XXX: Check other capabilities */
1051 #ifdef DEVICE_POLLING
1052 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1053 if (mask & IFCAP_POLLING) {
1054 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1055 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1056 error = ether_poll_register(arge_poll, ifp);
1060 ifp->if_capenable |= IFCAP_POLLING;
1063 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
1064 error = ether_poll_deregister(ifp);
1066 ifp->if_capenable &= ~IFCAP_POLLING;
1074 error = ether_ioctl(ifp, command, data);
1082 * Set media options.
1085 arge_ifmedia_upd(struct ifnet *ifp)
1087 struct arge_softc *sc;
1088 struct mii_data *mii;
1089 struct mii_softc *miisc;
1094 mii = device_get_softc(sc->arge_miibus);
1095 if (mii->mii_instance) {
1096 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1097 mii_phy_reset(miisc);
1099 error = mii_mediachg(mii);
1106 * Report current media status.
1109 arge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1111 struct arge_softc *sc = ifp->if_softc;
1112 struct mii_data *mii;
1114 mii = device_get_softc(sc->arge_miibus);
1118 ifmr->ifm_active = mii->mii_media_active;
1119 ifmr->ifm_status = mii->mii_media_status;
1122 struct arge_dmamap_arg {
1123 bus_addr_t arge_busaddr;
1127 arge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1129 struct arge_dmamap_arg *ctx;
1134 ctx->arge_busaddr = segs[0].ds_addr;
1138 arge_dma_alloc(struct arge_softc *sc)
1140 struct arge_dmamap_arg ctx;
1141 struct arge_txdesc *txd;
1142 struct arge_rxdesc *rxd;
1145 /* Create parent DMA tag. */
1146 error = bus_dma_tag_create(
1147 bus_get_dma_tag(sc->arge_dev), /* parent */
1148 1, 0, /* alignment, boundary */
1149 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1150 BUS_SPACE_MAXADDR, /* highaddr */
1151 NULL, NULL, /* filter, filterarg */
1152 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1154 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1156 NULL, NULL, /* lockfunc, lockarg */
1157 &sc->arge_cdata.arge_parent_tag);
1159 device_printf(sc->arge_dev, "failed to create parent DMA tag\n");
1162 /* Create tag for Tx ring. */
1163 error = bus_dma_tag_create(
1164 sc->arge_cdata.arge_parent_tag, /* parent */
1165 ARGE_RING_ALIGN, 0, /* alignment, boundary */
1166 BUS_SPACE_MAXADDR, /* lowaddr */
1167 BUS_SPACE_MAXADDR, /* highaddr */
1168 NULL, NULL, /* filter, filterarg */
1169 ARGE_TX_DMA_SIZE, /* maxsize */
1171 ARGE_TX_DMA_SIZE, /* maxsegsize */
1173 NULL, NULL, /* lockfunc, lockarg */
1174 &sc->arge_cdata.arge_tx_ring_tag);
1176 device_printf(sc->arge_dev, "failed to create Tx ring DMA tag\n");
1180 /* Create tag for Rx ring. */
1181 error = bus_dma_tag_create(
1182 sc->arge_cdata.arge_parent_tag, /* parent */
1183 ARGE_RING_ALIGN, 0, /* alignment, boundary */
1184 BUS_SPACE_MAXADDR, /* lowaddr */
1185 BUS_SPACE_MAXADDR, /* highaddr */
1186 NULL, NULL, /* filter, filterarg */
1187 ARGE_RX_DMA_SIZE, /* maxsize */
1189 ARGE_RX_DMA_SIZE, /* maxsegsize */
1191 NULL, NULL, /* lockfunc, lockarg */
1192 &sc->arge_cdata.arge_rx_ring_tag);
1194 device_printf(sc->arge_dev, "failed to create Rx ring DMA tag\n");
1198 /* Create tag for Tx buffers. */
1199 error = bus_dma_tag_create(
1200 sc->arge_cdata.arge_parent_tag, /* parent */
1201 sizeof(uint32_t), 0, /* alignment, boundary */
1202 BUS_SPACE_MAXADDR, /* lowaddr */
1203 BUS_SPACE_MAXADDR, /* highaddr */
1204 NULL, NULL, /* filter, filterarg */
1205 MCLBYTES * ARGE_MAXFRAGS, /* maxsize */
1206 ARGE_MAXFRAGS, /* nsegments */
1207 MCLBYTES, /* maxsegsize */
1209 NULL, NULL, /* lockfunc, lockarg */
1210 &sc->arge_cdata.arge_tx_tag);
1212 device_printf(sc->arge_dev, "failed to create Tx DMA tag\n");
1216 /* Create tag for Rx buffers. */
1217 error = bus_dma_tag_create(
1218 sc->arge_cdata.arge_parent_tag, /* parent */
1219 ARGE_RX_ALIGN, 0, /* alignment, boundary */
1220 BUS_SPACE_MAXADDR, /* lowaddr */
1221 BUS_SPACE_MAXADDR, /* highaddr */
1222 NULL, NULL, /* filter, filterarg */
1223 MCLBYTES, /* maxsize */
1224 ARGE_MAXFRAGS, /* nsegments */
1225 MCLBYTES, /* maxsegsize */
1227 NULL, NULL, /* lockfunc, lockarg */
1228 &sc->arge_cdata.arge_rx_tag);
1230 device_printf(sc->arge_dev, "failed to create Rx DMA tag\n");
1234 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1235 error = bus_dmamem_alloc(sc->arge_cdata.arge_tx_ring_tag,
1236 (void **)&sc->arge_rdata.arge_tx_ring, BUS_DMA_WAITOK |
1237 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_tx_ring_map);
1239 device_printf(sc->arge_dev,
1240 "failed to allocate DMA'able memory for Tx ring\n");
1244 ctx.arge_busaddr = 0;
1245 error = bus_dmamap_load(sc->arge_cdata.arge_tx_ring_tag,
1246 sc->arge_cdata.arge_tx_ring_map, sc->arge_rdata.arge_tx_ring,
1247 ARGE_TX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1248 if (error != 0 || ctx.arge_busaddr == 0) {
1249 device_printf(sc->arge_dev,
1250 "failed to load DMA'able memory for Tx ring\n");
1253 sc->arge_rdata.arge_tx_ring_paddr = ctx.arge_busaddr;
1255 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1256 error = bus_dmamem_alloc(sc->arge_cdata.arge_rx_ring_tag,
1257 (void **)&sc->arge_rdata.arge_rx_ring, BUS_DMA_WAITOK |
1258 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_rx_ring_map);
1260 device_printf(sc->arge_dev,
1261 "failed to allocate DMA'able memory for Rx ring\n");
1265 ctx.arge_busaddr = 0;
1266 error = bus_dmamap_load(sc->arge_cdata.arge_rx_ring_tag,
1267 sc->arge_cdata.arge_rx_ring_map, sc->arge_rdata.arge_rx_ring,
1268 ARGE_RX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1269 if (error != 0 || ctx.arge_busaddr == 0) {
1270 device_printf(sc->arge_dev,
1271 "failed to load DMA'able memory for Rx ring\n");
1274 sc->arge_rdata.arge_rx_ring_paddr = ctx.arge_busaddr;
1276 /* Create DMA maps for Tx buffers. */
1277 for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1278 txd = &sc->arge_cdata.arge_txdesc[i];
1280 txd->tx_dmamap = NULL;
1281 error = bus_dmamap_create(sc->arge_cdata.arge_tx_tag, 0,
1284 device_printf(sc->arge_dev,
1285 "failed to create Tx dmamap\n");
1289 /* Create DMA maps for Rx buffers. */
1290 if ((error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
1291 &sc->arge_cdata.arge_rx_sparemap)) != 0) {
1292 device_printf(sc->arge_dev,
1293 "failed to create spare Rx dmamap\n");
1296 for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1297 rxd = &sc->arge_cdata.arge_rxdesc[i];
1299 rxd->rx_dmamap = NULL;
1300 error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
1303 device_printf(sc->arge_dev,
1304 "failed to create Rx dmamap\n");
1314 arge_dma_free(struct arge_softc *sc)
1316 struct arge_txdesc *txd;
1317 struct arge_rxdesc *rxd;
1321 if (sc->arge_cdata.arge_tx_ring_tag) {
1322 if (sc->arge_cdata.arge_tx_ring_map)
1323 bus_dmamap_unload(sc->arge_cdata.arge_tx_ring_tag,
1324 sc->arge_cdata.arge_tx_ring_map);
1325 if (sc->arge_cdata.arge_tx_ring_map &&
1326 sc->arge_rdata.arge_tx_ring)
1327 bus_dmamem_free(sc->arge_cdata.arge_tx_ring_tag,
1328 sc->arge_rdata.arge_tx_ring,
1329 sc->arge_cdata.arge_tx_ring_map);
1330 sc->arge_rdata.arge_tx_ring = NULL;
1331 sc->arge_cdata.arge_tx_ring_map = NULL;
1332 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_ring_tag);
1333 sc->arge_cdata.arge_tx_ring_tag = NULL;
1336 if (sc->arge_cdata.arge_rx_ring_tag) {
1337 if (sc->arge_cdata.arge_rx_ring_map)
1338 bus_dmamap_unload(sc->arge_cdata.arge_rx_ring_tag,
1339 sc->arge_cdata.arge_rx_ring_map);
1340 if (sc->arge_cdata.arge_rx_ring_map &&
1341 sc->arge_rdata.arge_rx_ring)
1342 bus_dmamem_free(sc->arge_cdata.arge_rx_ring_tag,
1343 sc->arge_rdata.arge_rx_ring,
1344 sc->arge_cdata.arge_rx_ring_map);
1345 sc->arge_rdata.arge_rx_ring = NULL;
1346 sc->arge_cdata.arge_rx_ring_map = NULL;
1347 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_ring_tag);
1348 sc->arge_cdata.arge_rx_ring_tag = NULL;
1351 if (sc->arge_cdata.arge_tx_tag) {
1352 for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1353 txd = &sc->arge_cdata.arge_txdesc[i];
1354 if (txd->tx_dmamap) {
1355 bus_dmamap_destroy(sc->arge_cdata.arge_tx_tag,
1357 txd->tx_dmamap = NULL;
1360 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_tag);
1361 sc->arge_cdata.arge_tx_tag = NULL;
1364 if (sc->arge_cdata.arge_rx_tag) {
1365 for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1366 rxd = &sc->arge_cdata.arge_rxdesc[i];
1367 if (rxd->rx_dmamap) {
1368 bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
1370 rxd->rx_dmamap = NULL;
1373 if (sc->arge_cdata.arge_rx_sparemap) {
1374 bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
1375 sc->arge_cdata.arge_rx_sparemap);
1376 sc->arge_cdata.arge_rx_sparemap = 0;
1378 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_tag);
1379 sc->arge_cdata.arge_rx_tag = NULL;
1382 if (sc->arge_cdata.arge_parent_tag) {
1383 bus_dma_tag_destroy(sc->arge_cdata.arge_parent_tag);
1384 sc->arge_cdata.arge_parent_tag = NULL;
1389 * Initialize the transmit descriptors.
1392 arge_tx_ring_init(struct arge_softc *sc)
1394 struct arge_ring_data *rd;
1395 struct arge_txdesc *txd;
1399 sc->arge_cdata.arge_tx_prod = 0;
1400 sc->arge_cdata.arge_tx_cons = 0;
1401 sc->arge_cdata.arge_tx_cnt = 0;
1402 sc->arge_cdata.arge_tx_pkts = 0;
1404 rd = &sc->arge_rdata;
1405 bzero(rd->arge_tx_ring, sizeof(rd->arge_tx_ring));
1406 for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1407 if (i == ARGE_TX_RING_COUNT - 1)
1408 addr = ARGE_TX_RING_ADDR(sc, 0);
1410 addr = ARGE_TX_RING_ADDR(sc, i + 1);
1411 rd->arge_tx_ring[i].packet_ctrl = ARGE_DESC_EMPTY;
1412 rd->arge_tx_ring[i].next_desc = addr;
1413 txd = &sc->arge_cdata.arge_txdesc[i];
1417 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1418 sc->arge_cdata.arge_tx_ring_map,
1419 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1425 * Initialize the RX descriptors and allocate mbufs for them. Note that
1426 * we arrange the descriptors in a closed ring, so that the last descriptor
1427 * points back to the first.
1430 arge_rx_ring_init(struct arge_softc *sc)
1432 struct arge_ring_data *rd;
1433 struct arge_rxdesc *rxd;
1437 sc->arge_cdata.arge_rx_cons = 0;
1439 rd = &sc->arge_rdata;
1440 bzero(rd->arge_rx_ring, sizeof(rd->arge_rx_ring));
1441 for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1442 rxd = &sc->arge_cdata.arge_rxdesc[i];
1444 rxd->desc = &rd->arge_rx_ring[i];
1445 if (i == ARGE_RX_RING_COUNT - 1)
1446 addr = ARGE_RX_RING_ADDR(sc, 0);
1448 addr = ARGE_RX_RING_ADDR(sc, i + 1);
1449 rd->arge_rx_ring[i].next_desc = addr;
1450 if (arge_newbuf(sc, i) != 0) {
1455 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1456 sc->arge_cdata.arge_rx_ring_map,
1457 BUS_DMASYNC_PREWRITE);
1463 * Initialize an RX descriptor and attach an MBUF cluster.
1466 arge_newbuf(struct arge_softc *sc, int idx)
1468 struct arge_desc *desc;
1469 struct arge_rxdesc *rxd;
1471 bus_dma_segment_t segs[1];
1475 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1478 m->m_len = m->m_pkthdr.len = MCLBYTES;
1479 m_adj(m, sizeof(uint64_t));
1481 if (bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_rx_tag,
1482 sc->arge_cdata.arge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1486 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1488 rxd = &sc->arge_cdata.arge_rxdesc[idx];
1489 if (rxd->rx_m != NULL) {
1490 bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap);
1492 map = rxd->rx_dmamap;
1493 rxd->rx_dmamap = sc->arge_cdata.arge_rx_sparemap;
1494 sc->arge_cdata.arge_rx_sparemap = map;
1497 if (segs[0].ds_addr & 3)
1498 panic("RX packet address unaligned");
1499 desc->packet_addr = segs[0].ds_addr;
1500 desc->packet_ctrl = ARGE_DESC_EMPTY | ARGE_DMASIZE(segs[0].ds_len);
1502 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1503 sc->arge_cdata.arge_rx_ring_map,
1504 BUS_DMASYNC_PREWRITE);
1509 static __inline void
1510 arge_fixup_rx(struct mbuf *m)
1513 uint16_t *src, *dst;
1515 src = mtod(m, uint16_t *);
1518 for (i = 0; i < m->m_len / sizeof(uint16_t); i++) {
1522 if (m->m_len % sizeof(uint16_t))
1523 *(uint8_t *)dst = *(uint8_t *)src;
1525 m->m_data -= ETHER_ALIGN;
1528 #ifdef DEVICE_POLLING
1530 arge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1532 struct arge_softc *sc = ifp->if_softc;
1535 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1538 rx_npkts = arge_rx_locked(sc);
1544 #endif /* DEVICE_POLLING */
1548 arge_tx_locked(struct arge_softc *sc)
1550 struct arge_txdesc *txd;
1551 struct arge_desc *cur_tx;
1556 ARGE_LOCK_ASSERT(sc);
1558 cons = sc->arge_cdata.arge_tx_cons;
1559 prod = sc->arge_cdata.arge_tx_prod;
1563 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1564 sc->arge_cdata.arge_tx_ring_map,
1565 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1569 * Go through our tx list and free mbufs for those
1570 * frames that have been transmitted.
1572 for (; cons != prod; ARGE_INC(cons, ARGE_TX_RING_COUNT)) {
1573 cur_tx = &sc->arge_rdata.arge_tx_ring[cons];
1574 ctrl = cur_tx->packet_ctrl;
1575 /* Check if descriptor has "finished" flag */
1576 if ((ctrl & ARGE_DESC_EMPTY) == 0)
1579 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
1581 sc->arge_cdata.arge_tx_cnt--;
1582 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1584 txd = &sc->arge_cdata.arge_txdesc[cons];
1588 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
1589 BUS_DMASYNC_POSTWRITE);
1590 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
1592 /* Free only if it's first descriptor in list */
1597 /* reset descriptor */
1598 cur_tx->packet_addr = 0;
1601 sc->arge_cdata.arge_tx_cons = cons;
1603 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1604 sc->arge_cdata.arge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1609 arge_rx_locked(struct arge_softc *sc)
1611 struct arge_rxdesc *rxd;
1612 struct ifnet *ifp = sc->arge_ifp;
1613 int cons, prog, packet_len, i;
1614 struct arge_desc *cur_rx;
1618 ARGE_LOCK_ASSERT(sc);
1620 cons = sc->arge_cdata.arge_rx_cons;
1622 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1623 sc->arge_cdata.arge_rx_ring_map,
1624 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1626 for (prog = 0; prog < ARGE_RX_RING_COUNT;
1627 ARGE_INC(cons, ARGE_RX_RING_COUNT)) {
1628 cur_rx = &sc->arge_rdata.arge_rx_ring[cons];
1629 rxd = &sc->arge_cdata.arge_rxdesc[cons];
1632 if ((cur_rx->packet_ctrl & ARGE_DESC_EMPTY) != 0)
1635 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
1639 packet_len = ARGE_DMASIZE(cur_rx->packet_ctrl);
1640 bus_dmamap_sync(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap,
1641 BUS_DMASYNC_POSTREAD);
1645 m->m_pkthdr.rcvif = ifp;
1646 /* Skip 4 bytes of CRC */
1647 m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN;
1652 (*ifp->if_input)(ifp, m);
1654 cur_rx->packet_addr = 0;
1659 i = sc->arge_cdata.arge_rx_cons;
1660 for (; prog > 0 ; prog--) {
1661 if (arge_newbuf(sc, i) != 0) {
1662 device_printf(sc->arge_dev,
1663 "Failed to allocate buffer\n");
1666 ARGE_INC(i, ARGE_RX_RING_COUNT);
1669 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1670 sc->arge_cdata.arge_rx_ring_map,
1671 BUS_DMASYNC_PREWRITE);
1673 sc->arge_cdata.arge_rx_cons = cons;
1680 arge_intr_filter(void *arg)
1682 struct arge_softc *sc = arg;
1683 uint32_t status, ints;
1685 status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
1686 ints = ARGE_READ(sc, AR71XX_DMA_INTR);
1689 dprintf("int mask(filter) = %b\n", ints,
1690 "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
1691 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1692 dprintf("status(filter) = %b\n", status,
1693 "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
1694 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1697 if (status & DMA_INTR_ALL) {
1698 sc->arge_intr_status |= status;
1699 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1700 return (FILTER_SCHEDULE_THREAD);
1703 sc->arge_intr_status = 0;
1704 return (FILTER_STRAY);
1708 arge_intr(void *arg)
1710 struct arge_softc *sc = arg;
1713 status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
1714 status |= sc->arge_intr_status;
1717 dprintf("int status(intr) = %b\n", status,
1718 "\20\10\7RX_OVERFLOW\5RX_PKT_RCVD"
1719 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1723 * Is it our interrupt at all?
1728 if (status & DMA_INTR_RX_BUS_ERROR) {
1729 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_BUS_ERROR);
1730 device_printf(sc->arge_dev, "RX bus error");
1734 if (status & DMA_INTR_TX_BUS_ERROR) {
1735 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_BUS_ERROR);
1736 device_printf(sc->arge_dev, "TX bus error");
1742 if (status & DMA_INTR_RX_PKT_RCVD)
1746 * RX overrun disables the receiver.
1747 * Clear indication and re-enable rx.
1749 if ( status & DMA_INTR_RX_OVERFLOW) {
1750 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_OVERFLOW);
1751 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
1754 if (status & DMA_INTR_TX_PKT_SENT)
1757 * Underrun turns off TX. Clear underrun indication.
1758 * If there's anything left in the ring, reactivate the tx.
1760 if (status & DMA_INTR_TX_UNDERRUN) {
1761 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_UNDERRUN);
1762 if (sc->arge_cdata.arge_tx_pkts > 0 ) {
1763 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL,
1769 * We handled all bits, clear status
1771 sc->arge_intr_status = 0;
1774 * re-enable all interrupts
1776 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
1781 arge_tick(void *xsc)
1783 struct arge_softc *sc = xsc;
1784 struct mii_data *mii;
1786 ARGE_LOCK_ASSERT(sc);
1788 if (sc->arge_miibus) {
1789 mii = device_get_softc(sc->arge_miibus);
1791 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
1796 arge_multiphy_mediachange(struct ifnet *ifp)
1798 struct arge_softc *sc = ifp->if_softc;
1799 struct ifmedia *ifm = &sc->arge_ifmedia;
1800 struct ifmedia_entry *ife = ifm->ifm_cur;
1802 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1805 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
1806 device_printf(sc->arge_dev,
1807 "AUTO is not supported for multiphy MAC");
1818 arge_multiphy_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1820 struct arge_softc *sc = ifp->if_softc;
1822 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1823 ifmr->ifm_active = IFM_ETHER | sc->arge_media_type |
1824 sc->arge_duplex_mode;