2 * Copyright (c) 2016 Hiroki Mori. All rights reserved.
4 * Oleksandr Tymoshenko <gonzo@freebsd.org>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
24 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
25 * THE POSSIBILITY OF SUCH DAMAGE.
31 #include "opt_platform.h"
32 #include "opt_ar531x.h"
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
38 * AR531x Ethernet interface driver
39 * copy from mips/idt/if_kr.c and netbsd code
41 #include <sys/param.h>
42 #include <sys/endian.h>
43 #include <sys/systm.h>
44 #include <sys/sockio.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
49 #include <sys/module.h>
50 #include <sys/mutex.h>
51 #include <sys/socket.h>
52 #include <sys/taskqueue.h>
56 #include <net/if_arp.h>
57 #include <net/ethernet.h>
58 #include <net/if_dl.h>
59 #include <net/if_media.h>
60 #include <net/if_types.h>
61 #include <net/if_var.h>
65 #include <machine/bus.h>
66 #include <machine/resource.h>
71 #include <machine/intr.h>
74 #include <dev/mii/mii.h>
75 #include <dev/mii/miivar.h>
78 #include <dev/mdio/mdio.h>
79 #include <dev/etherswitch/miiproxy.h>
83 MODULE_DEPEND(are, ether, 1, 1, 1);
84 MODULE_DEPEND(are, miibus, 1, 1, 1);
86 #include "miibus_if.h"
88 #include <mips/atheros/ar531x/ar5315reg.h>
89 #include <mips/atheros/ar531x/ar5312reg.h>
90 #include <mips/atheros/ar531x/ar5315_setup.h>
91 #include <mips/atheros/ar531x/if_arereg.h>
94 void dump_txdesc(struct are_softc *, int);
95 void dump_status_reg(struct are_softc *);
98 static int are_attach(device_t);
99 static int are_detach(device_t);
100 static int are_ifmedia_upd(struct ifnet *);
101 static void are_ifmedia_sts(struct ifnet *, struct ifmediareq *);
102 static int are_ioctl(struct ifnet *, u_long, caddr_t);
103 static void are_init(void *);
104 static void are_init_locked(struct are_softc *);
105 static void are_link_task(void *, int);
106 static int are_miibus_readreg(device_t, int, int);
107 static void are_miibus_statchg(device_t);
108 static int are_miibus_writereg(device_t, int, int, int);
109 static int are_probe(device_t);
110 static void are_reset(struct are_softc *);
111 static int are_resume(device_t);
112 static int are_rx_ring_init(struct are_softc *);
113 static int are_tx_ring_init(struct are_softc *);
114 static int are_shutdown(device_t);
115 static void are_start(struct ifnet *);
116 static void are_start_locked(struct ifnet *);
117 static void are_stop(struct are_softc *);
118 static int are_suspend(device_t);
120 static void are_rx(struct are_softc *);
121 static void are_tx(struct are_softc *);
122 static void are_intr(void *);
123 static void are_tick(void *);
125 static void are_dmamap_cb(void *, bus_dma_segment_t *, int, int);
126 static int are_dma_alloc(struct are_softc *);
127 static void are_dma_free(struct are_softc *);
128 static int are_newbuf(struct are_softc *, int);
129 static __inline void are_fixup_rx(struct mbuf *);
131 static void are_hinted_child(device_t bus, const char *dname, int dunit);
133 static device_method_t are_methods[] = {
134 /* Device interface */
135 DEVMETHOD(device_probe, are_probe),
136 DEVMETHOD(device_attach, are_attach),
137 DEVMETHOD(device_detach, are_detach),
138 DEVMETHOD(device_suspend, are_suspend),
139 DEVMETHOD(device_resume, are_resume),
140 DEVMETHOD(device_shutdown, are_shutdown),
143 DEVMETHOD(miibus_readreg, are_miibus_readreg),
144 DEVMETHOD(miibus_writereg, are_miibus_writereg),
145 DEVMETHOD(miibus_statchg, are_miibus_statchg),
148 DEVMETHOD(bus_add_child, device_add_child_ordered),
149 DEVMETHOD(bus_hinted_child, are_hinted_child),
154 static driver_t are_driver = {
157 sizeof(struct are_softc)
160 static devclass_t are_devclass;
162 DRIVER_MODULE(are, nexus, are_driver, are_devclass, 0, 0);
164 DRIVER_MODULE(miibus, are, miibus_driver, miibus_devclass, 0, 0);
168 static int aremdio_probe(device_t);
169 static int aremdio_attach(device_t);
170 static int aremdio_detach(device_t);
173 * Declare an additional, separate driver for accessing the MDIO bus.
175 static device_method_t aremdio_methods[] = {
176 /* Device interface */
177 DEVMETHOD(device_probe, aremdio_probe),
178 DEVMETHOD(device_attach, aremdio_attach),
179 DEVMETHOD(device_detach, aremdio_detach),
182 DEVMETHOD(bus_add_child, device_add_child_ordered),
185 DEVMETHOD(mdio_readreg, are_miibus_readreg),
186 DEVMETHOD(mdio_writereg, are_miibus_writereg),
189 DEFINE_CLASS_0(aremdio, aremdio_driver, aremdio_methods,
190 sizeof(struct are_softc));
191 static devclass_t aremdio_devclass;
193 DRIVER_MODULE(miiproxy, are, miiproxy_driver, miiproxy_devclass, 0, 0);
194 DRIVER_MODULE(aremdio, nexus, aremdio_driver, aremdio_devclass, 0, 0);
195 DRIVER_MODULE(mdio, aremdio, mdio_driver, mdio_devclass, 0, 0);
200 are_probe(device_t dev)
203 device_set_desc(dev, "AR531x Ethernet interface");
208 are_attach(device_t dev)
211 struct are_softc *sc;
223 sc = device_get_softc(dev);
224 unit = device_get_unit(dev);
227 /* hardcode macaddress */
228 sc->are_eaddr[0] = 0x00;
229 sc->are_eaddr[1] = 0x0C;
230 sc->are_eaddr[2] = 0x42;
231 sc->are_eaddr[3] = 0x09;
232 sc->are_eaddr[4] = 0x5E;
233 sc->are_eaddr[5] = 0x6B;
235 /* try to get from hints */
236 if (!resource_string_value(device_get_name(dev),
237 device_get_unit(dev), "macaddr", (const char **)&local_macstr)) {
238 uint32_t tmpmac[ETHER_ADDR_LEN];
240 /* Have a MAC address; should use it */
241 device_printf(dev, "Overriding MAC address from environment: '%s'\n",
244 /* Extract out the MAC address */
245 /* XXX this should all be a generic method */
246 count = sscanf(local_macstr, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
247 &tmpmac[0], &tmpmac[1],
248 &tmpmac[2], &tmpmac[3],
249 &tmpmac[4], &tmpmac[5]);
252 for (i = 0; i < ETHER_ADDR_LEN; i++)
253 sc->are_eaddr[i] = tmpmac[i];
257 mtx_init(&sc->are_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
259 callout_init_mtx(&sc->are_stat_callout, &sc->are_mtx, 0);
260 TASK_INIT(&sc->are_link_task, 0, are_link_task, sc);
262 /* Map control/status registers. */
264 sc->are_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->are_rid,
265 RF_ACTIVE | RF_SHAREABLE);
267 if (sc->are_res == NULL) {
268 device_printf(dev, "couldn't map memory\n");
273 sc->are_btag = rman_get_bustag(sc->are_res);
274 sc->are_bhandle = rman_get_bushandle(sc->are_res);
277 /* Allocate interrupts */
279 sc->are_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
280 RF_SHAREABLE | RF_ACTIVE);
282 if (sc->are_irq == NULL) {
283 device_printf(dev, "couldn't map interrupt\n");
289 /* Allocate ifnet structure. */
290 ifp = sc->are_ifp = if_alloc(IFT_ETHER);
293 device_printf(dev, "couldn't allocate ifnet structure\n");
298 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
299 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
300 ifp->if_ioctl = are_ioctl;
301 ifp->if_start = are_start;
302 ifp->if_init = are_init;
303 sc->are_if_flags = ifp->if_flags;
305 /* ifqmaxlen is sysctl value in net/if.c */
306 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
307 ifp->if_snd.ifq_maxlen = ifqmaxlen;
308 IFQ_SET_READY(&ifp->if_snd);
310 /* Tell the upper layer(s) we support long frames. */
311 ifp->if_capabilities |= IFCAP_VLAN_MTU;
313 ifp->if_capenable = ifp->if_capabilities;
315 if (are_dma_alloc(sc) != 0) {
320 CSR_WRITE_4(sc, CSR_BUSMODE, BUSMODE_SWR);
324 sc->are_miiproxy = mii_attach_proxy(sc->are_dev);
329 error = mii_attach(dev, &sc->are_miibus, ifp, are_ifmedia_upd,
330 are_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
332 device_printf(dev, "attaching PHYs failed\n");
336 ifmedia_init(&sc->are_ifmedia, 0, are_ifmedia_upd, are_ifmedia_sts);
338 ifmedia_add(&sc->are_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
339 ifmedia_set(&sc->are_ifmedia, IFM_ETHER | IFM_AUTO);
342 /* Call MI attach routine. */
343 ether_ifattach(ifp, sc->are_eaddr);
347 if (ar531x_soc >= AR531X_SOC_AR5315) {
348 enetirq = AR5315_CPU_IRQ_ENET;
351 if (device_get_unit(dev) == 0) {
352 enetirq = AR5312_IRQ_ENET0;
355 enetirq = AR5312_IRQ_ENET1;
359 cpu_establish_hardintr(name, NULL, are_intr, sc, enetirq,
360 INTR_TYPE_NET, NULL);
362 /* Hook interrupt last to avoid having to lock softc */
363 error = bus_setup_intr(dev, sc->are_irq, INTR_TYPE_NET | INTR_MPSAFE,
364 NULL, are_intr, sc, &sc->are_intrhand);
367 device_printf(dev, "couldn't set up irq\n");
381 are_detach(device_t dev)
383 struct are_softc *sc = device_get_softc(dev);
384 struct ifnet *ifp = sc->are_ifp;
386 KASSERT(mtx_initialized(&sc->are_mtx), ("vr mutex not initialized"));
388 /* These should only be active if attach succeeded */
389 if (device_is_attached(dev)) {
394 taskqueue_drain(taskqueue_swi, &sc->are_link_task);
399 device_delete_child(dev, sc->are_miibus);
401 bus_generic_detach(dev);
403 if (sc->are_intrhand)
404 bus_teardown_intr(dev, sc->are_irq, sc->are_intrhand);
406 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->are_irq);
409 bus_release_resource(dev, SYS_RES_MEMORY, sc->are_rid,
417 mtx_destroy(&sc->are_mtx);
424 are_suspend(device_t dev)
427 panic("%s", __func__);
432 are_resume(device_t dev)
435 panic("%s", __func__);
440 are_shutdown(device_t dev)
442 struct are_softc *sc;
444 sc = device_get_softc(dev);
454 are_miibus_readreg(device_t dev, int phy, int reg)
456 struct are_softc * sc = device_get_softc(dev);
460 addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT);
461 CSR_WRITE_4(sc, CSR_MIIADDR, addr);
462 for (i = 0; i < 100000000; i++) {
463 if ((CSR_READ_4(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0)
467 return (CSR_READ_4(sc, CSR_MIIDATA) & 0xffff);
471 are_miibus_writereg(device_t dev, int phy, int reg, int data)
473 struct are_softc * sc = device_get_softc(dev);
477 /* write the data register */
478 CSR_WRITE_4(sc, CSR_MIIDATA, data);
480 /* write the address to latch it in */
481 addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT) |
483 CSR_WRITE_4(sc, CSR_MIIADDR, addr);
485 for (i = 0; i < 100000000; i++) {
486 if ((CSR_READ_4(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0)
494 are_miibus_statchg(device_t dev)
496 struct are_softc *sc;
498 sc = device_get_softc(dev);
499 taskqueue_enqueue(taskqueue_swi, &sc->are_link_task);
503 are_link_task(void *arg, int pending)
506 struct are_softc *sc;
507 struct mii_data *mii;
509 /* int lfdx, mfdx; */
511 sc = (struct are_softc *)arg;
514 mii = device_get_softc(sc->are_miibus);
516 if (mii == NULL || ifp == NULL ||
517 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
522 if (mii->mii_media_status & IFM_ACTIVE) {
523 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
524 sc->are_link_status = 1;
526 sc->are_link_status = 0;
533 are_reset(struct are_softc *sc)
537 CSR_WRITE_4(sc, CSR_BUSMODE, BUSMODE_SWR);
540 * The chip doesn't take itself out of reset automatically.
541 * We need to do so after 2us.
544 CSR_WRITE_4(sc, CSR_BUSMODE, 0);
546 for (i = 0; i < 1000; i++) {
548 * Wait a bit for the reset to complete before peeking
552 if ((CSR_READ_4(sc, CSR_BUSMODE) & BUSMODE_SWR) == 0)
556 if (CSR_READ_4(sc, CSR_BUSMODE) & BUSMODE_SWR)
557 device_printf(sc->are_dev, "reset time out\n");
565 struct are_softc *sc = xsc;
573 are_init_locked(struct are_softc *sc)
575 struct ifnet *ifp = sc->are_ifp;
577 struct mii_data *mii;
583 mii = device_get_softc(sc->are_miibus);
589 /* Init circular RX list. */
590 if (are_rx_ring_init(sc) != 0) {
591 device_printf(sc->are_dev,
592 "initialization failed: no memory for rx buffers\n");
597 /* Init tx descriptors. */
598 are_tx_ring_init(sc);
601 * Initialize the BUSMODE register.
603 CSR_WRITE_4(sc, CSR_BUSMODE,
604 /* XXX: not sure if this is a good thing or not... */
605 BUSMODE_BAR | BUSMODE_BLE | BUSMODE_PBL_4LW);
608 * Initialize the interrupt mask and enable interrupts.
610 /* normal interrupts */
611 sc->sc_inten = STATUS_TI | STATUS_TU | STATUS_RI | STATUS_NIS;
613 /* abnormal interrupts */
614 sc->sc_inten |= STATUS_TPS | STATUS_TJT | STATUS_UNF |
615 STATUS_RU | STATUS_RPS | STATUS_SE | STATUS_AIS;
617 sc->sc_rxint_mask = STATUS_RI|STATUS_RU;
618 sc->sc_txint_mask = STATUS_TI|STATUS_UNF|STATUS_TJT;
620 sc->sc_rxint_mask &= sc->sc_inten;
621 sc->sc_txint_mask &= sc->sc_inten;
623 CSR_WRITE_4(sc, CSR_INTEN, sc->sc_inten);
624 CSR_WRITE_4(sc, CSR_STATUS, 0xffffffff);
627 * Give the transmit and receive rings to the chip.
629 CSR_WRITE_4(sc, CSR_TXLIST, ARE_TX_RING_ADDR(sc, 0));
630 CSR_WRITE_4(sc, CSR_RXLIST, ARE_RX_RING_ADDR(sc, 0));
633 * Set the station address.
635 CSR_WRITE_4(sc, CSR_MACHI, sc->are_eaddr[5] << 16 | sc->are_eaddr[4]);
636 CSR_WRITE_4(sc, CSR_MACLO, sc->are_eaddr[3] << 24 |
637 sc->are_eaddr[2] << 16 | sc->are_eaddr[1] << 8 | sc->are_eaddr[0]);
642 CSR_WRITE_4(sc, CSR_FLOWC, FLOWC_FCE);
643 CSR_WRITE_4(sc, CSR_MACCTL, MACCTL_RE | MACCTL_TE |
644 MACCTL_PM | MACCTL_FDX | MACCTL_HBD | MACCTL_RA);
647 * Write out the opmode.
649 CSR_WRITE_4(sc, CSR_OPMODE, OPMODE_SR | OPMODE_ST | OPMODE_SF |
653 * Start the receive process.
655 CSR_WRITE_4(sc, CSR_RXPOLL, RXPOLL_RPD);
657 sc->are_link_status = 1;
662 ifp->if_drv_flags |= IFF_DRV_RUNNING;
663 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
665 callout_reset(&sc->are_stat_callout, hz, are_tick, sc);
669 are_start(struct ifnet *ifp)
671 struct are_softc *sc;
676 are_start_locked(ifp);
681 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
682 * pointers to the fragment pointers.
685 are_encap(struct are_softc *sc, struct mbuf **m_head)
687 struct are_txdesc *txd;
688 struct are_desc *desc, *prev_desc;
690 bus_dma_segment_t txsegs[ARE_MAXFRAGS];
692 int error, i, nsegs, prod, si, prev_prod;
697 startcount = sc->are_cdata.are_tx_cnt;
702 * Some VIA Rhine wants packet buffers to be longword
703 * aligned, but very often our mbufs aren't. Rather than
704 * waste time trying to decide when to copy and when not
705 * to copy, just do it all the time.
707 m = m_defrag(*m_head, M_NOWAIT);
709 device_printf(sc->are_dev, "are_encap m_defrag error\n");
717 * The Rhine chip doesn't auto-pad, so we have to make
718 * sure to pad short frames out to the minimum frame length
721 if ((*m_head)->m_pkthdr.len < ARE_MIN_FRAMELEN) {
723 padlen = ARE_MIN_FRAMELEN - m->m_pkthdr.len;
724 if (M_WRITABLE(m) == 0) {
725 /* Get a writable copy. */
726 m = m_dup(*m_head, M_NOWAIT);
729 device_printf(sc->are_dev, "are_encap m_dup error\n");
735 if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) {
736 m = m_defrag(m, M_NOWAIT);
738 device_printf(sc->are_dev, "are_encap m_defrag error\n");
745 * Manually pad short frames, and zero the pad space
746 * to avoid leaking data.
748 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
749 m->m_pkthdr.len += padlen;
750 m->m_len = m->m_pkthdr.len;
754 prod = sc->are_cdata.are_tx_prod;
755 txd = &sc->are_cdata.are_txdesc[prod];
756 error = bus_dmamap_load_mbuf_sg(sc->are_cdata.are_tx_tag,
757 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
758 if (error == EFBIG) {
759 device_printf(sc->are_dev, "are_encap EFBIG error\n");
760 m = m_defrag(*m_head, M_NOWAIT);
767 error = bus_dmamap_load_mbuf_sg(sc->are_cdata.are_tx_tag,
768 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
775 } else if (error != 0)
783 /* Check number of available descriptors. */
784 if (sc->are_cdata.are_tx_cnt + nsegs >= (ARE_TX_RING_CNT - 1)) {
785 bus_dmamap_unload(sc->are_cdata.are_tx_tag, txd->tx_dmamap);
790 bus_dmamap_sync(sc->are_cdata.are_tx_tag, txd->tx_dmamap,
791 BUS_DMASYNC_PREWRITE);
796 * Make a list of descriptors for this packet. DMA controller will
797 * walk through it while are_link is not zero. The last one should
798 * have COF flag set, to pickup next chain from NDPTR
801 desc = prev_desc = NULL;
802 for (i = 0; i < nsegs; i++) {
803 desc = &sc->are_rdata.are_tx_ring[prod];
804 desc->are_stat = ADSTAT_OWN;
805 desc->are_devcs = ARE_DMASIZE(txsegs[i].ds_len);
806 desc->are_addr = txsegs[i].ds_addr;
807 /* link with previous descriptor */
808 /* end of descriptor */
809 if (prod == ARE_TX_RING_CNT - 1)
810 desc->are_devcs |= ADCTL_ER;
812 sc->are_cdata.are_tx_cnt++;
814 ARE_INC(prod, ARE_TX_RING_CNT);
818 * Set mark last fragment with LD flag
821 desc->are_devcs |= ADCTL_Tx_IC;
822 desc->are_devcs |= ADCTL_Tx_LS;
825 /* Update producer index. */
826 sc->are_cdata.are_tx_prod = prod;
828 /* Sync descriptors. */
829 bus_dmamap_sync(sc->are_cdata.are_tx_ring_tag,
830 sc->are_cdata.are_tx_ring_map,
831 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
833 /* Start transmitting */
834 /* Check if new list is queued in NDPTR */
835 txstat = (CSR_READ_4(sc, CSR_STATUS) >> 20) & 7;
836 if (startcount == 0 && (txstat == 0 || txstat == 6)) {
837 desc = &sc->are_rdata.are_tx_ring[si];
838 desc->are_devcs |= ADCTL_Tx_FS;
841 link_addr = ARE_TX_RING_ADDR(sc, si);
842 /* Get previous descriptor */
843 si = (si + ARE_TX_RING_CNT - 1) % ARE_TX_RING_CNT;
844 desc = &sc->are_rdata.are_tx_ring[si];
845 desc->are_devcs &= ~(ADCTL_Tx_IC | ADCTL_Tx_LS);
852 are_start_locked(struct ifnet *ifp)
854 struct are_softc *sc;
863 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
864 IFF_DRV_RUNNING || sc->are_link_status == 0 )
867 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
868 sc->are_cdata.are_tx_cnt < ARE_TX_RING_CNT - 2; ) {
869 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
873 * Pack the data into the transmit ring. If we
874 * don't have room, set the OACTIVE flag and wait
875 * for the NIC to drain the ring.
877 if (are_encap(sc, &m_head)) {
880 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
881 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
887 * If there's a BPF listener, bounce a copy of this frame
890 ETHER_BPF_MTAP(ifp, m_head);
894 txstat = (CSR_READ_4(sc, CSR_STATUS) >> 20) & 7;
895 if (txstat == 0 || txstat == 6) {
896 /* Transmit Process Stat is stop or suspended */
897 CSR_WRITE_4(sc, CSR_TXPOLL, TXPOLL_TPD);
903 are_stop(struct are_softc *sc)
910 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
911 callout_stop(&sc->are_stat_callout);
913 /* Disable interrupts. */
914 CSR_WRITE_4(sc, CSR_INTEN, 0);
916 /* Stop the transmit and receive processes. */
917 CSR_WRITE_4(sc, CSR_OPMODE, 0);
918 CSR_WRITE_4(sc, CSR_RXLIST, 0);
919 CSR_WRITE_4(sc, CSR_TXLIST, 0);
920 CSR_WRITE_4(sc, CSR_MACCTL,
921 CSR_READ_4(sc, CSR_MACCTL) & ~(MACCTL_TE | MACCTL_RE));
926 are_set_filter(struct are_softc *sc)
934 macctl = CSR_READ_4(sc, CSR_MACCTL);
935 macctl &= ~(MACCTL_PR | MACCTL_PM);
936 macctl |= MACCTL_HBD;
938 if (ifp->if_flags & IFF_PROMISC)
941 /* Todo: hash table set.
942 * But I don't know how to use multicast hash table at this soc.
945 /* this is allmulti */
946 mchash[0] = mchash[1] = 0xffffffff;
949 CSR_WRITE_4(sc, CSR_HTLO, mchash[0]);
950 CSR_WRITE_4(sc, CSR_HTHI, mchash[1]);
951 CSR_WRITE_4(sc, CSR_MACCTL, macctl);
957 are_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
959 struct are_softc *sc = ifp->if_softc;
960 struct ifreq *ifr = (struct ifreq *) data;
962 struct mii_data *mii;
969 if (ifp->if_flags & IFF_UP) {
970 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
971 if ((ifp->if_flags ^ sc->are_if_flags) &
972 (IFF_PROMISC | IFF_ALLMULTI))
975 if (sc->are_detach == 0)
979 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
982 sc->are_if_flags = ifp->if_flags;
996 mii = device_get_softc(sc->are_miibus);
997 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
999 error = ifmedia_ioctl(ifp, ifr, &sc->are_ifmedia, command);
1006 error = ether_ioctl(ifp, command, data);
1014 * Set media options.
1017 are_ifmedia_upd(struct ifnet *ifp)
1020 struct are_softc *sc;
1021 struct mii_data *mii;
1022 struct mii_softc *miisc;
1027 mii = device_get_softc(sc->are_miibus);
1028 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1030 error = mii_mediachg(mii);
1040 * Report current media status.
1043 are_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1046 struct are_softc *sc = ifp->if_softc;
1047 struct mii_data *mii;
1049 mii = device_get_softc(sc->are_miibus);
1052 ifmr->ifm_active = mii->mii_media_active;
1053 ifmr->ifm_status = mii->mii_media_status;
1056 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1060 struct are_dmamap_arg {
1061 bus_addr_t are_busaddr;
1065 are_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1067 struct are_dmamap_arg *ctx;
1072 ctx->are_busaddr = segs[0].ds_addr;
1076 are_dma_alloc(struct are_softc *sc)
1078 struct are_dmamap_arg ctx;
1079 struct are_txdesc *txd;
1080 struct are_rxdesc *rxd;
1083 /* Create parent DMA tag. */
1084 error = bus_dma_tag_create(
1085 bus_get_dma_tag(sc->are_dev), /* parent */
1086 1, 0, /* alignment, boundary */
1087 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1088 BUS_SPACE_MAXADDR, /* highaddr */
1089 NULL, NULL, /* filter, filterarg */
1090 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1092 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1094 NULL, NULL, /* lockfunc, lockarg */
1095 &sc->are_cdata.are_parent_tag);
1097 device_printf(sc->are_dev, "failed to create parent DMA tag\n");
1100 /* Create tag for Tx ring. */
1101 error = bus_dma_tag_create(
1102 sc->are_cdata.are_parent_tag, /* parent */
1103 ARE_RING_ALIGN, 0, /* alignment, boundary */
1104 BUS_SPACE_MAXADDR, /* lowaddr */
1105 BUS_SPACE_MAXADDR, /* highaddr */
1106 NULL, NULL, /* filter, filterarg */
1107 ARE_TX_RING_SIZE, /* maxsize */
1109 ARE_TX_RING_SIZE, /* maxsegsize */
1111 NULL, NULL, /* lockfunc, lockarg */
1112 &sc->are_cdata.are_tx_ring_tag);
1114 device_printf(sc->are_dev, "failed to create Tx ring DMA tag\n");
1118 /* Create tag for Rx ring. */
1119 error = bus_dma_tag_create(
1120 sc->are_cdata.are_parent_tag, /* parent */
1121 ARE_RING_ALIGN, 0, /* alignment, boundary */
1122 BUS_SPACE_MAXADDR, /* lowaddr */
1123 BUS_SPACE_MAXADDR, /* highaddr */
1124 NULL, NULL, /* filter, filterarg */
1125 ARE_RX_RING_SIZE, /* maxsize */
1127 ARE_RX_RING_SIZE, /* maxsegsize */
1129 NULL, NULL, /* lockfunc, lockarg */
1130 &sc->are_cdata.are_rx_ring_tag);
1132 device_printf(sc->are_dev, "failed to create Rx ring DMA tag\n");
1136 /* Create tag for Tx buffers. */
1137 error = bus_dma_tag_create(
1138 sc->are_cdata.are_parent_tag, /* parent */
1139 sizeof(uint32_t), 0, /* alignment, boundary */
1140 BUS_SPACE_MAXADDR, /* lowaddr */
1141 BUS_SPACE_MAXADDR, /* highaddr */
1142 NULL, NULL, /* filter, filterarg */
1143 MCLBYTES * ARE_MAXFRAGS, /* maxsize */
1144 ARE_MAXFRAGS, /* nsegments */
1145 MCLBYTES, /* maxsegsize */
1147 NULL, NULL, /* lockfunc, lockarg */
1148 &sc->are_cdata.are_tx_tag);
1150 device_printf(sc->are_dev, "failed to create Tx DMA tag\n");
1154 /* Create tag for Rx buffers. */
1155 error = bus_dma_tag_create(
1156 sc->are_cdata.are_parent_tag, /* parent */
1157 ARE_RX_ALIGN, 0, /* alignment, boundary */
1158 BUS_SPACE_MAXADDR, /* lowaddr */
1159 BUS_SPACE_MAXADDR, /* highaddr */
1160 NULL, NULL, /* filter, filterarg */
1161 MCLBYTES, /* maxsize */
1163 MCLBYTES, /* maxsegsize */
1165 NULL, NULL, /* lockfunc, lockarg */
1166 &sc->are_cdata.are_rx_tag);
1168 device_printf(sc->are_dev, "failed to create Rx DMA tag\n");
1172 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1173 error = bus_dmamem_alloc(sc->are_cdata.are_tx_ring_tag,
1174 (void **)&sc->are_rdata.are_tx_ring, BUS_DMA_WAITOK |
1175 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->are_cdata.are_tx_ring_map);
1177 device_printf(sc->are_dev,
1178 "failed to allocate DMA'able memory for Tx ring\n");
1182 ctx.are_busaddr = 0;
1183 error = bus_dmamap_load(sc->are_cdata.are_tx_ring_tag,
1184 sc->are_cdata.are_tx_ring_map, sc->are_rdata.are_tx_ring,
1185 ARE_TX_RING_SIZE, are_dmamap_cb, &ctx, 0);
1186 if (error != 0 || ctx.are_busaddr == 0) {
1187 device_printf(sc->are_dev,
1188 "failed to load DMA'able memory for Tx ring\n");
1191 sc->are_rdata.are_tx_ring_paddr = ctx.are_busaddr;
1193 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1194 error = bus_dmamem_alloc(sc->are_cdata.are_rx_ring_tag,
1195 (void **)&sc->are_rdata.are_rx_ring, BUS_DMA_WAITOK |
1196 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->are_cdata.are_rx_ring_map);
1198 device_printf(sc->are_dev,
1199 "failed to allocate DMA'able memory for Rx ring\n");
1203 ctx.are_busaddr = 0;
1204 error = bus_dmamap_load(sc->are_cdata.are_rx_ring_tag,
1205 sc->are_cdata.are_rx_ring_map, sc->are_rdata.are_rx_ring,
1206 ARE_RX_RING_SIZE, are_dmamap_cb, &ctx, 0);
1207 if (error != 0 || ctx.are_busaddr == 0) {
1208 device_printf(sc->are_dev,
1209 "failed to load DMA'able memory for Rx ring\n");
1212 sc->are_rdata.are_rx_ring_paddr = ctx.are_busaddr;
1214 /* Create DMA maps for Tx buffers. */
1215 for (i = 0; i < ARE_TX_RING_CNT; i++) {
1216 txd = &sc->are_cdata.are_txdesc[i];
1218 txd->tx_dmamap = NULL;
1219 error = bus_dmamap_create(sc->are_cdata.are_tx_tag, 0,
1222 device_printf(sc->are_dev,
1223 "failed to create Tx dmamap\n");
1227 /* Create DMA maps for Rx buffers. */
1228 if ((error = bus_dmamap_create(sc->are_cdata.are_rx_tag, 0,
1229 &sc->are_cdata.are_rx_sparemap)) != 0) {
1230 device_printf(sc->are_dev,
1231 "failed to create spare Rx dmamap\n");
1234 for (i = 0; i < ARE_RX_RING_CNT; i++) {
1235 rxd = &sc->are_cdata.are_rxdesc[i];
1237 rxd->rx_dmamap = NULL;
1238 error = bus_dmamap_create(sc->are_cdata.are_rx_tag, 0,
1241 device_printf(sc->are_dev,
1242 "failed to create Rx dmamap\n");
1252 are_dma_free(struct are_softc *sc)
1254 struct are_txdesc *txd;
1255 struct are_rxdesc *rxd;
1259 if (sc->are_cdata.are_tx_ring_tag) {
1260 if (sc->are_rdata.are_tx_ring_paddr)
1261 bus_dmamap_unload(sc->are_cdata.are_tx_ring_tag,
1262 sc->are_cdata.are_tx_ring_map);
1263 if (sc->are_rdata.are_tx_ring)
1264 bus_dmamem_free(sc->are_cdata.are_tx_ring_tag,
1265 sc->are_rdata.are_tx_ring,
1266 sc->are_cdata.are_tx_ring_map);
1267 sc->are_rdata.are_tx_ring = NULL;
1268 sc->are_rdata.are_tx_ring_paddr = 0;
1269 bus_dma_tag_destroy(sc->are_cdata.are_tx_ring_tag);
1270 sc->are_cdata.are_tx_ring_tag = NULL;
1273 if (sc->are_cdata.are_rx_ring_tag) {
1274 if (sc->are_rdata.are_rx_ring_paddr)
1275 bus_dmamap_unload(sc->are_cdata.are_rx_ring_tag,
1276 sc->are_cdata.are_rx_ring_map);
1277 if (sc->are_rdata.are_rx_ring)
1278 bus_dmamem_free(sc->are_cdata.are_rx_ring_tag,
1279 sc->are_rdata.are_rx_ring,
1280 sc->are_cdata.are_rx_ring_map);
1281 sc->are_rdata.are_rx_ring = NULL;
1282 sc->are_rdata.are_rx_ring_paddr = 0;
1283 bus_dma_tag_destroy(sc->are_cdata.are_rx_ring_tag);
1284 sc->are_cdata.are_rx_ring_tag = NULL;
1287 if (sc->are_cdata.are_tx_tag) {
1288 for (i = 0; i < ARE_TX_RING_CNT; i++) {
1289 txd = &sc->are_cdata.are_txdesc[i];
1290 if (txd->tx_dmamap) {
1291 bus_dmamap_destroy(sc->are_cdata.are_tx_tag,
1293 txd->tx_dmamap = NULL;
1296 bus_dma_tag_destroy(sc->are_cdata.are_tx_tag);
1297 sc->are_cdata.are_tx_tag = NULL;
1300 if (sc->are_cdata.are_rx_tag) {
1301 for (i = 0; i < ARE_RX_RING_CNT; i++) {
1302 rxd = &sc->are_cdata.are_rxdesc[i];
1303 if (rxd->rx_dmamap) {
1304 bus_dmamap_destroy(sc->are_cdata.are_rx_tag,
1306 rxd->rx_dmamap = NULL;
1309 if (sc->are_cdata.are_rx_sparemap) {
1310 bus_dmamap_destroy(sc->are_cdata.are_rx_tag,
1311 sc->are_cdata.are_rx_sparemap);
1312 sc->are_cdata.are_rx_sparemap = 0;
1314 bus_dma_tag_destroy(sc->are_cdata.are_rx_tag);
1315 sc->are_cdata.are_rx_tag = NULL;
1318 if (sc->are_cdata.are_parent_tag) {
1319 bus_dma_tag_destroy(sc->are_cdata.are_parent_tag);
1320 sc->are_cdata.are_parent_tag = NULL;
1325 * Initialize the transmit descriptors.
1328 are_tx_ring_init(struct are_softc *sc)
1330 struct are_ring_data *rd;
1331 struct are_txdesc *txd;
1335 sc->are_cdata.are_tx_prod = 0;
1336 sc->are_cdata.are_tx_cons = 0;
1337 sc->are_cdata.are_tx_cnt = 0;
1338 sc->are_cdata.are_tx_pkts = 0;
1340 rd = &sc->are_rdata;
1341 bzero(rd->are_tx_ring, ARE_TX_RING_SIZE);
1342 for (i = 0; i < ARE_TX_RING_CNT; i++) {
1343 if (i == ARE_TX_RING_CNT - 1)
1344 addr = ARE_TX_RING_ADDR(sc, 0);
1346 addr = ARE_TX_RING_ADDR(sc, i + 1);
1347 rd->are_tx_ring[i].are_stat = 0;
1348 rd->are_tx_ring[i].are_devcs = 0;
1349 rd->are_tx_ring[i].are_addr = 0;
1350 rd->are_tx_ring[i].are_link = addr;
1351 txd = &sc->are_cdata.are_txdesc[i];
1355 bus_dmamap_sync(sc->are_cdata.are_tx_ring_tag,
1356 sc->are_cdata.are_tx_ring_map,
1357 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1363 * Initialize the RX descriptors and allocate mbufs for them. Note that
1364 * we arrange the descriptors in a closed ring, so that the last descriptor
1365 * points back to the first.
1368 are_rx_ring_init(struct are_softc *sc)
1370 struct are_ring_data *rd;
1371 struct are_rxdesc *rxd;
1375 sc->are_cdata.are_rx_cons = 0;
1377 rd = &sc->are_rdata;
1378 bzero(rd->are_rx_ring, ARE_RX_RING_SIZE);
1379 for (i = 0; i < ARE_RX_RING_CNT; i++) {
1380 rxd = &sc->are_cdata.are_rxdesc[i];
1382 rxd->desc = &rd->are_rx_ring[i];
1383 if (i == ARE_RX_RING_CNT - 1)
1384 addr = ARE_RX_RING_ADDR(sc, 0);
1386 addr = ARE_RX_RING_ADDR(sc, i + 1);
1387 rd->are_rx_ring[i].are_stat = ADSTAT_OWN;
1388 rd->are_rx_ring[i].are_devcs = ADCTL_CH;
1389 if (i == ARE_RX_RING_CNT - 1)
1390 rd->are_rx_ring[i].are_devcs |= ADCTL_ER;
1391 rd->are_rx_ring[i].are_addr = 0;
1392 rd->are_rx_ring[i].are_link = addr;
1393 if (are_newbuf(sc, i) != 0)
1397 bus_dmamap_sync(sc->are_cdata.are_rx_ring_tag,
1398 sc->are_cdata.are_rx_ring_map,
1399 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1405 * Initialize an RX descriptor and attach an MBUF cluster.
1408 are_newbuf(struct are_softc *sc, int idx)
1410 struct are_desc *desc;
1411 struct are_rxdesc *rxd;
1413 bus_dma_segment_t segs[1];
1417 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1420 m->m_len = m->m_pkthdr.len = MCLBYTES;
1422 /* tcp header boundary margin */
1425 if (bus_dmamap_load_mbuf_sg(sc->are_cdata.are_rx_tag,
1426 sc->are_cdata.are_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1430 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1432 rxd = &sc->are_cdata.are_rxdesc[idx];
1433 if (rxd->rx_m != NULL) {
1435 * THis is if_kr.c original code but make bug. Make scranble on buffer data.
1436 * bus_dmamap_sync(sc->are_cdata.are_rx_tag, rxd->rx_dmamap,
1437 * BUS_DMASYNC_POSTREAD);
1439 bus_dmamap_unload(sc->are_cdata.are_rx_tag, rxd->rx_dmamap);
1441 map = rxd->rx_dmamap;
1442 rxd->rx_dmamap = sc->are_cdata.are_rx_sparemap;
1443 sc->are_cdata.are_rx_sparemap = map;
1444 bus_dmamap_sync(sc->are_cdata.are_rx_tag, rxd->rx_dmamap,
1445 BUS_DMASYNC_PREREAD);
1448 desc->are_addr = segs[0].ds_addr;
1449 desc->are_devcs |= ARE_DMASIZE(segs[0].ds_len);
1450 rxd->saved_ca = desc->are_addr ;
1451 rxd->saved_ctl = desc->are_stat ;
1456 static __inline void
1457 are_fixup_rx(struct mbuf *m)
1460 uint16_t *src, *dst;
1462 src = mtod(m, uint16_t *);
1465 for (i = 0; i < m->m_len / sizeof(uint16_t); i++) {
1469 if (m->m_len % sizeof(uint16_t))
1470 *(uint8_t *)dst = *(uint8_t *)src;
1472 m->m_data -= ETHER_ALIGN;
1477 are_tx(struct are_softc *sc)
1479 struct are_txdesc *txd;
1480 struct are_desc *cur_tx;
1482 uint32_t ctl, devcs;
1485 ARE_LOCK_ASSERT(sc);
1487 cons = sc->are_cdata.are_tx_cons;
1488 prod = sc->are_cdata.are_tx_prod;
1492 bus_dmamap_sync(sc->are_cdata.are_tx_ring_tag,
1493 sc->are_cdata.are_tx_ring_map,
1494 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1498 * Go through our tx list and free mbufs for those
1499 * frames that have been transmitted.
1501 for (; cons != prod; ARE_INC(cons, ARE_TX_RING_CNT)) {
1502 cur_tx = &sc->are_rdata.are_tx_ring[cons];
1503 ctl = cur_tx->are_stat;
1504 devcs = cur_tx->are_devcs;
1505 /* Check if descriptor has "finished" flag */
1506 if (ARE_DMASIZE(devcs) == 0)
1509 sc->are_cdata.are_tx_cnt--;
1510 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1512 txd = &sc->are_cdata.are_txdesc[cons];
1514 if ((ctl & ADSTAT_Tx_ES) == 0)
1515 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1517 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1520 bus_dmamap_sync(sc->are_cdata.are_tx_tag, txd->tx_dmamap,
1521 BUS_DMASYNC_POSTWRITE);
1522 bus_dmamap_unload(sc->are_cdata.are_tx_tag, txd->tx_dmamap);
1524 /* Free only if it's first descriptor in list */
1529 /* reset descriptor */
1530 cur_tx->are_stat = 0;
1531 cur_tx->are_devcs = 0;
1532 cur_tx->are_addr = 0;
1535 sc->are_cdata.are_tx_cons = cons;
1537 bus_dmamap_sync(sc->are_cdata.are_tx_ring_tag,
1538 sc->are_cdata.are_tx_ring_map, BUS_DMASYNC_PREWRITE);
1543 are_rx(struct are_softc *sc)
1545 struct are_rxdesc *rxd;
1546 struct ifnet *ifp = sc->are_ifp;
1547 int cons, prog, packet_len, error;
1548 struct are_desc *cur_rx;
1551 ARE_LOCK_ASSERT(sc);
1553 cons = sc->are_cdata.are_rx_cons;
1555 bus_dmamap_sync(sc->are_cdata.are_rx_ring_tag,
1556 sc->are_cdata.are_rx_ring_map,
1557 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1559 for (prog = 0; prog < ARE_RX_RING_CNT; ARE_INC(cons, ARE_RX_RING_CNT)) {
1560 cur_rx = &sc->are_rdata.are_rx_ring[cons];
1561 rxd = &sc->are_cdata.are_rxdesc[cons];
1564 if ((cur_rx->are_stat & ADSTAT_OWN) == ADSTAT_OWN)
1569 packet_len = ADSTAT_Rx_LENGTH(cur_rx->are_stat);
1570 /* Assume it's error */
1573 if (packet_len < 64)
1574 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1575 else if ((cur_rx->are_stat & ADSTAT_Rx_DE) == 0) {
1577 bus_dmamap_sync(sc->are_cdata.are_rx_tag, rxd->rx_dmamap,
1578 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1580 /* Skip 4 bytes of CRC */
1581 m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN;
1583 m->m_pkthdr.rcvif = ifp;
1584 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1587 (*ifp->if_input)(ifp, m);
1592 /* Restore CONTROL and CA values, reset DEVCS */
1593 cur_rx->are_stat = rxd->saved_ctl;
1594 cur_rx->are_addr = rxd->saved_ca;
1595 cur_rx->are_devcs = 0;
1598 /* Reinit descriptor */
1599 cur_rx->are_stat = ADSTAT_OWN;
1600 cur_rx->are_devcs = 0;
1601 if (cons == ARE_RX_RING_CNT - 1)
1602 cur_rx->are_devcs |= ADCTL_ER;
1603 cur_rx->are_addr = 0;
1604 if (are_newbuf(sc, cons) != 0) {
1605 device_printf(sc->are_dev,
1606 "Failed to allocate buffer\n");
1611 bus_dmamap_sync(sc->are_cdata.are_rx_ring_tag,
1612 sc->are_cdata.are_rx_ring_map,
1613 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1618 sc->are_cdata.are_rx_cons = cons;
1620 bus_dmamap_sync(sc->are_cdata.are_rx_ring_tag,
1621 sc->are_cdata.are_rx_ring_map,
1622 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1629 struct are_softc *sc = arg;
1631 struct ifnet *ifp = sc->are_ifp;
1635 /* mask out interrupts */
1637 status = CSR_READ_4(sc, CSR_STATUS);
1639 CSR_WRITE_4(sc, CSR_STATUS, status);
1641 if (status & sc->sc_rxint_mask) {
1644 if (status & sc->sc_txint_mask) {
1648 /* Try to get more packets going. */
1658 struct are_softc *sc = xsc;
1659 struct mii_data *mii;
1661 ARE_LOCK_ASSERT(sc);
1663 mii = device_get_softc(sc->are_miibus);
1665 callout_reset(&sc->are_stat_callout, hz, are_tick, sc);
1670 are_hinted_child(device_t bus, const char *dname, int dunit)
1672 BUS_ADD_CHILD(bus, 0, dname, dunit);
1673 device_printf(bus, "hinted child %s%d\n", dname, dunit);
1678 aremdio_probe(device_t dev)
1680 device_set_desc(dev, "Atheros AR531x built-in ethernet interface, MDIO controller");
1685 aremdio_attach(device_t dev)
1687 struct are_softc *sc;
1690 sc = device_get_softc(dev);
1693 sc->are_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1694 &sc->are_rid, RF_ACTIVE | RF_SHAREABLE);
1695 if (sc->are_res == NULL) {
1696 device_printf(dev, "couldn't map memory\n");
1701 sc->are_btag = rman_get_bustag(sc->are_res);
1702 sc->are_bhandle = rman_get_bushandle(sc->are_res);
1704 bus_generic_probe(dev);
1705 bus_enumerate_hinted_children(dev);
1706 error = bus_generic_attach(dev);
1712 aremdio_detach(device_t dev)
1720 dump_txdesc(struct are_softc *sc, int pos)
1722 struct are_desc *desc;
1724 desc = &sc->are_rdata.are_tx_ring[pos];
1725 device_printf(sc->are_dev, "CSR_TXLIST %08x\n", CSR_READ_4(sc, CSR_TXLIST));
1726 device_printf(sc->are_dev, "CSR_HTBA %08x\n", CSR_READ_4(sc, CSR_HTBA));
1727 device_printf(sc->are_dev, "%d TDES0:%08x TDES1:%08x TDES2:%08x TDES3:%08x\n",
1728 pos, desc->are_stat, desc->are_devcs, desc->are_addr, desc->are_link);
1732 dump_status_reg(struct are_softc *sc)
1736 /* mask out interrupts */
1738 device_printf(sc->are_dev, "CSR_HTBA %08x\n", CSR_READ_4(sc, CSR_HTBA));
1739 status = CSR_READ_4(sc, CSR_STATUS);
1740 device_printf(sc->are_dev, "CSR5 Status Register EB:%d TS:%d RS:%d NIS:%d AIS:%d ER:%d SE:%d LNF:%d TM:%d RWT:%d RPS:%d RU:%d RI:%d UNF:%d LNP/ANC:%d TJT:%d TU:%d TPS:%d TI:%d\n",
1741 (status >> 23 ) & 7,
1742 (status >> 20 ) & 7,
1743 (status >> 17 ) & 7,
1744 (status >> 16 ) & 1,
1745 (status >> 15 ) & 1,
1746 (status >> 14 ) & 1,
1747 (status >> 13 ) & 1,
1748 (status >> 12 ) & 1,
1749 (status >> 11 ) & 1,
1759 (status >> 0 ) & 1);