2 * Copyright (c) 2010 Yohanes Nugroho <yohanes@gmail.com>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include "opt_platform.h"
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 #include <sys/taskqueue.h>
47 #include <net/ethernet.h>
49 #include <net/if_arp.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/if_types.h>
53 #include <net/if_vlan_var.h>
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/in_var.h>
59 #include <netinet/ip.h>
63 #include <net/bpfdesc.h>
65 #include <dev/mii/mii.h>
66 #include <dev/mii/miivar.h>
68 #include <arm/at91/at91_pmcvar.h>
69 #include <arm/at91/if_macbreg.h>
70 #include <arm/at91/if_macbvar.h>
71 #include <arm/at91/at91_piovar.h>
73 #include <arm/at91/at91sam9g20reg.h>
75 #include <machine/bus.h>
76 #include <machine/intr.h>
79 #include <dev/ofw/ofw_bus.h>
80 #include <dev/ofw/ofw_bus_subr.h>
83 /* "device miibus" required. See GENERIC if you get errors here. */
84 #include "miibus_if.h"
87 #define MACB_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
88 #define MACB_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
89 #define MACB_LOCK_INIT(_sc) \
90 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
91 MTX_NETWORK_LOCK, MTX_DEF)
92 #define MACB_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
93 #define MACB_LOCK_ASSERT(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
94 #define MACB_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
97 static inline uint32_t
98 read_4(struct macb_softc *sc, bus_size_t off)
101 return (bus_read_4(sc->mem_res, off));
105 write_4(struct macb_softc *sc, bus_size_t off, uint32_t val)
108 bus_write_4(sc->mem_res, off, val);
112 static devclass_t macb_devclass;
114 /* ifnet entry points */
116 static void macbinit_locked(void *);
117 static void macbstart_locked(struct ifnet *);
119 static void macbinit(void *);
120 static void macbstart(struct ifnet *);
121 static void macbstop(struct macb_softc *);
122 static int macbioctl(struct ifnet * ifp, u_long, caddr_t);
124 /* bus entry points */
126 static int macb_probe(device_t dev);
127 static int macb_attach(device_t dev);
128 static int macb_detach(device_t dev);
130 /* helper functions */
132 macb_new_rxbuf(struct macb_softc *sc, int index);
135 macb_free_desc_dma_tx(struct macb_softc *sc);
138 macb_free_desc_dma_rx(struct macb_softc *sc);
141 macb_init_desc_dma_tx(struct macb_softc *sc);
144 macb_watchdog(struct macb_softc *sc);
146 static int macb_intr_rx_locked(struct macb_softc *sc, int count);
147 static void macb_intr_task(void *arg, int pending __unused);
148 static void macb_intr(void *xsc);
151 macb_tx_cleanup(struct macb_softc *sc);
154 phy_write(struct macb_softc *sc, int phy, int reg, int data);
156 static void macb_reset(struct macb_softc *sc);
159 macb_deactivate(device_t dev)
161 struct macb_softc *sc;
163 sc = device_get_softc(dev);
165 macb_free_desc_dma_tx(sc);
166 macb_free_desc_dma_rx(sc);
171 macb_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
175 KASSERT(nsegs == 1, ("wrong number of segments, should be 1"));
177 *paddr = segs->ds_addr;
181 macb_alloc_desc_dma_tx(struct macb_softc *sc)
185 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
186 error = bus_dma_tag_create(sc->sc_parent_tag, /* parent */
187 16, 0, /* alignment, boundary */
188 BUS_SPACE_MAXADDR, /* lowaddr */
189 BUS_SPACE_MAXADDR, /* highaddr */
190 NULL, NULL, /* filtfunc, filtfuncarg */
191 sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS, /* max size */
193 sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS,
195 NULL, NULL, /* lockfunc, lockfuncarg */
196 &sc->dmatag_data_tx); /* dmat */
198 device_printf(sc->dev,
199 "Couldn't create TX descriptor dma tag\n");
202 /* Allocate memory for TX ring. */
203 error = bus_dmamem_alloc(sc->dmatag_data_tx,
204 (void**)&(sc->desc_tx), BUS_DMA_NOWAIT | BUS_DMA_ZERO |
205 BUS_DMA_COHERENT, &sc->dmamap_ring_tx);
207 device_printf(sc->dev, "failed to allocate TX dma memory\n");
211 error = bus_dmamap_load(sc->dmatag_data_tx, sc->dmamap_ring_tx,
212 sc->desc_tx, sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS,
213 macb_getaddr, &sc->ring_paddr_tx, BUS_DMA_NOWAIT);
215 device_printf(sc->dev, "can't load TX descriptor dma map\n");
218 /* Allocate a busdma tag for mbufs. No alignment restriction applys. */
219 error = bus_dma_tag_create(sc->sc_parent_tag, /* parent */
220 1, 0, /* alignment, boundary */
221 BUS_SPACE_MAXADDR, /* lowaddr */
222 BUS_SPACE_MAXADDR, /* highaddr */
223 NULL, NULL, /* filtfunc, filtfuncarg */
224 MCLBYTES * MAX_FRAGMENT, /* maxsize */
225 MAX_FRAGMENT, /* nsegments */
226 MCLBYTES, 0, /* maxsegsz, flags */
227 NULL, NULL, /* lockfunc, lockfuncarg */
228 &sc->dmatag_ring_tx); /* dmat */
230 device_printf(sc->dev, "failed to create TX mbuf dma tag\n");
234 for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
235 /* Create dma map for each descriptor. */
236 error = bus_dmamap_create(sc->dmatag_ring_tx, 0,
237 &sc->tx_desc[i].dmamap);
239 device_printf(sc->dev,
240 "failed to create TX mbuf dma map\n");
248 macb_free_desc_dma_tx(struct macb_softc *sc)
250 struct tx_desc_info *td;
254 if (sc->dmatag_ring_tx != NULL) {
255 for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
256 td = &sc->tx_desc[i];
257 if (td->dmamap != NULL) {
258 bus_dmamap_destroy(sc->dmatag_ring_tx,
263 bus_dma_tag_destroy(sc->dmatag_ring_tx);
264 sc->dmatag_ring_tx = NULL;
267 /* TX descriptor ring. */
268 if (sc->dmatag_data_tx != NULL) {
269 if (sc->ring_paddr_tx != 0)
270 bus_dmamap_unload(sc->dmatag_data_tx,
272 if (sc->desc_tx != NULL)
273 bus_dmamem_free(sc->dmatag_data_tx, sc->desc_tx,
275 sc->ring_paddr_tx = 0;
277 bus_dma_tag_destroy(sc->dmatag_data_tx);
278 sc->dmatag_data_tx = NULL;
283 macb_init_desc_dma_tx(struct macb_softc *sc)
285 struct eth_tx_desc *desc;
288 MACB_LOCK_ASSERT(sc);
294 desc = &sc->desc_tx[0];
295 bzero(desc, sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS);
297 for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
298 desc = &sc->desc_tx[i];
299 if (i == MACB_MAX_TX_BUFFERS - 1)
300 desc->flags = TD_OWN | TD_WRAP_MASK;
302 desc->flags = TD_OWN;
305 bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
306 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
310 macb_alloc_desc_dma_rx(struct macb_softc *sc)
314 /* Allocate a busdma tag and DMA safe memory for RX descriptors. */
315 error = bus_dma_tag_create(sc->sc_parent_tag, /* parent */
316 16, 0, /* alignment, boundary */
317 BUS_SPACE_MAXADDR, /* lowaddr */
318 BUS_SPACE_MAXADDR, /* highaddr */
319 NULL, NULL, /* filtfunc, filtfuncarg */
320 /* maxsize, nsegments */
321 sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS, 1,
322 /* maxsegsz, flags */
323 sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS, 0,
324 NULL, NULL, /* lockfunc, lockfuncarg */
325 &sc->dmatag_data_rx); /* dmat */
327 device_printf(sc->dev,
328 "Couldn't create RX descriptor dma tag\n");
331 /* Allocate RX ring. */
332 error = bus_dmamem_alloc(sc->dmatag_data_rx, (void**)&(sc->desc_rx),
333 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
334 &sc->dmamap_ring_rx);
336 device_printf(sc->dev,
337 "failed to allocate RX descriptor dma memory\n");
342 error = bus_dmamap_load(sc->dmatag_data_rx, sc->dmamap_ring_rx,
343 sc->desc_rx, sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS,
344 macb_getaddr, &sc->ring_paddr_rx, BUS_DMA_NOWAIT);
346 device_printf(sc->dev, "can't load RX descriptor dma map\n");
350 /* Allocate a busdma tag for mbufs. */
351 error = bus_dma_tag_create(sc->sc_parent_tag,/* parent */
352 16, 0, /* alignment, boundary */
353 BUS_SPACE_MAXADDR, /* lowaddr */
354 BUS_SPACE_MAXADDR, /* highaddr */
355 NULL, NULL, /* filtfunc, filtfuncarg */
356 MCLBYTES, 1, /* maxsize, nsegments */
357 MCLBYTES, 0, /* maxsegsz, flags */
358 NULL, NULL, /* lockfunc, lockfuncarg */
359 &sc->dmatag_ring_rx); /* dmat */
362 device_printf(sc->dev, "failed to create RX mbuf dma tag\n");
366 for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
367 error = bus_dmamap_create(sc->dmatag_ring_rx, 0,
368 &sc->rx_desc[i].dmamap);
370 device_printf(sc->dev,
371 "failed to create RX mbuf dmamap\n");
380 macb_free_desc_dma_rx(struct macb_softc *sc)
382 struct rx_desc_info *rd;
386 if (sc->dmatag_ring_rx != NULL) {
387 for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
388 rd = &sc->rx_desc[i];
389 if (rd->dmamap != NULL) {
390 bus_dmamap_destroy(sc->dmatag_ring_rx,
395 bus_dma_tag_destroy(sc->dmatag_ring_rx);
396 sc->dmatag_ring_rx = NULL;
398 /* RX descriptor ring. */
399 if (sc->dmatag_data_rx != NULL) {
400 if (sc->ring_paddr_rx != 0)
401 bus_dmamap_unload(sc->dmatag_data_rx,
403 if (sc->desc_rx != NULL)
404 bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx,
406 sc->ring_paddr_rx = 0;
408 bus_dma_tag_destroy(sc->dmatag_data_rx);
409 sc->dmatag_data_rx = NULL;
414 macb_init_desc_dma_rx(struct macb_softc *sc)
416 struct eth_rx_desc *desc;
417 struct rx_desc_info *rd;
420 MACB_LOCK_ASSERT(sc);
423 desc = &sc->desc_rx[0];
424 bzero(desc, sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS);
425 for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
426 rd = &sc->rx_desc[i];
428 if (macb_new_rxbuf(sc, i) != 0)
431 bus_dmamap_sync(sc->dmatag_ring_rx, sc->dmamap_ring_rx,
432 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
437 macb_new_rxbuf(struct macb_softc *sc, int index)
439 struct rx_desc_info *rd;
440 struct eth_rx_desc *desc;
442 bus_dma_segment_t seg[1];
445 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
448 m->m_len = m->m_pkthdr.len = MCLBYTES - ETHER_ALIGN;
449 rd = &sc->rx_desc[index];
450 bus_dmamap_unload(sc->dmatag_ring_rx, rd->dmamap);
451 error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_rx, rd->dmamap, m,
453 KASSERT(nsegs == 1, ("Too many segments returned!"));
459 bus_dmamap_sync(sc->dmatag_ring_rx, rd->dmamap, BUS_DMASYNC_PREREAD);
462 desc = &sc->desc_rx[index];
463 desc->addr = seg[0].ds_addr;
465 desc->flags = DATA_SIZE;
467 if (index == MACB_MAX_RX_BUFFERS - 1)
468 desc->addr |= RD_WRAP_MASK;
474 macb_allocate_dma(struct macb_softc *sc)
478 /* Create parent tag for tx and rx */
479 error = bus_dma_tag_create(
480 bus_get_dma_tag(sc->dev), /* parent */
481 1, 0, /* alignment, boundary */
482 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
483 BUS_SPACE_MAXADDR, /* highaddr */
484 NULL, NULL, /* filter, filterarg */
485 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
486 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
488 NULL, NULL, /* lockfunc, lockarg */
491 device_printf(sc->dev, "Couldn't create parent DMA tag\n");
495 if ((error = macb_alloc_desc_dma_tx(sc)) != 0)
497 if ((error = macb_alloc_desc_dma_rx(sc)) != 0)
506 struct macb_softc *sc;
507 struct mii_data *mii;
510 mii = device_get_softc(sc->miibus);
514 * Schedule another timeout one second from now.
516 callout_reset(&sc->tick_ch, hz, macb_tick, sc);
521 macb_watchdog(struct macb_softc *sc)
525 MACB_LOCK_ASSERT(sc);
527 if (sc->macb_watchdog_timer == 0 || --sc->macb_watchdog_timer)
531 if ((sc->flags & MACB_FLAG_LINK) == 0) {
532 if_printf(ifp, "watchdog timeout (missed link)\n");
533 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
537 if_printf(ifp, "watchdog timeout\n");
538 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
539 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
541 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
542 macbstart_locked(ifp);
548 macbinit_locked(void *xsc)
550 struct macb_softc *sc;
554 struct mii_data *mii;
559 MACB_LOCK_ASSERT(sc);
561 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
564 if ((err = macb_init_desc_dma_rx(sc)) != 0) {
565 device_printf(sc->dev, "no memory for RX buffers\n");
569 macb_init_desc_dma_tx(sc);
571 config = read_4(sc, EMAC_NCFGR) | (sc->clock << 10); /*set clock*/
572 config |= CFG_PAE; /* PAuse Enable */
573 config |= CFG_DRFCS; /* Discard Rx FCS */
574 config |= CFG_SPD; /* 100 mbps*/
578 config |= CFG_RBOF_2; /*offset +2*/
580 write_4(sc, EMAC_NCFGR, config);
582 /* Initialize TX and RX buffers */
583 write_4(sc, EMAC_RBQP, sc->ring_paddr_rx);
584 write_4(sc, EMAC_TBQP, sc->ring_paddr_tx);
586 /* Enable TX and RX */
587 write_4(sc, EMAC_NCR, RX_ENABLE | TX_ENABLE | MPE_ENABLE);
590 /* Enable interrupts */
591 write_4(sc, EMAC_IER, (RCOMP_INTERRUPT |
602 * Set 'running' flag, and clear output active flag
603 * and attempt to start the output
605 ifp->if_drv_flags |= IFF_DRV_RUNNING;
606 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
608 mii = device_get_softc(sc->miibus);
610 sc->flags |= MACB_FLAG_LINK;
614 callout_reset(&sc->tick_ch, hz, macb_tick, sc);
619 macb_tx_cleanup(struct macb_softc *sc)
622 struct eth_tx_desc *desc;
623 struct tx_desc_info *td;
628 MACB_LOCK_ASSERT(sc);
630 status = read_4(sc, EMAC_TSR);
632 write_4(sc, EMAC_TSR, status);
635 if ((status & TSR_UND) != 0) {
637 printf("underrun\n");
638 bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
639 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
640 sc->tx_cons = sc->tx_prod = 0;
641 for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
642 desc = &sc->desc_tx[i];
643 desc->flags = TD_OWN;
646 for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
647 td = &sc->tx_desc[i];
648 if (td->buff != NULL) {
649 /* We are finished with this descriptor. */
650 bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
651 BUS_DMASYNC_POSTWRITE);
652 /* ... and unload, so we can reuse. */
653 bus_dmamap_unload(sc->dmatag_data_tx,
661 if ((status & TSR_COMP) == 0)
665 if (sc->tx_cons == sc->tx_prod)
670 /* Prepare to read the ring (owner bit). */
671 bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
672 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
673 while (sc->tx_cons != sc->tx_prod) {
674 desc = &sc->desc_tx[sc->tx_cons];
675 if ((desc->flags & TD_OWN) == 0)
678 td = &sc->tx_desc[sc->tx_cons];
679 if (td->buff != NULL) {
680 /* We are finished with this descriptor. */
681 bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
682 BUS_DMASYNC_POSTWRITE);
683 /* ... and unload, so we can reuse. */
684 bus_dmamap_unload(sc->dmatag_data_tx,
688 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
693 MACB_DESC_INC(sc->tx_cons, MACB_MAX_TX_BUFFERS);
694 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
696 desc->flags = TD_OWN;
697 desc = &sc->desc_tx[sc->tx_cons];
698 if (flags & TD_LAST) {
701 } while (sc->tx_cons != sc->tx_prod);
704 /* Unarm watchog timer when there is no pending descriptors in queue. */
706 sc->macb_watchdog_timer = 0;
710 macb_rx(struct macb_softc *sc)
712 struct eth_rx_desc *rxdesc;
720 rxdesc = &(sc->desc_rx[sc->rx_cons]);
724 bus_dmamap_sync(sc->dmatag_ring_rx, sc->dmamap_ring_rx,
725 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
729 while (rxdesc->addr & RD_OWN) {
731 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
734 flags = rxdesc->flags;
736 rxbytes = flags & RD_LEN_MASK;
738 m = sc->rx_desc[sc->rx_cons].buff;
740 bus_dmamap_sync(sc->dmatag_ring_rx,
741 sc->rx_desc[sc->rx_cons].dmamap, BUS_DMASYNC_POSTREAD);
742 if (macb_new_rxbuf(sc, sc->rx_cons) != 0) {
743 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
747 rxdesc->flags = DATA_SIZE;
748 MACB_DESC_INC(sc->rx_cons, MACB_MAX_RX_BUFFERS);
749 if ((rxdesc->flags & RD_EOF) != 0)
751 rxdesc = &(sc->desc_rx[sc->rx_cons]);
752 } while (sc->rx_cons != first);
754 if (sc->macb_cdata.rxhead != NULL) {
755 m_freem(sc->macb_cdata.rxhead);
756 sc->macb_cdata.rxhead = NULL;
757 sc->macb_cdata.rxtail = NULL;
765 /* Chain received mbufs. */
766 if (sc->macb_cdata.rxhead == NULL) {
768 sc->macb_cdata.rxhead = m;
769 sc->macb_cdata.rxtail = m;
773 m->m_len = DATA_SIZE - 2;
775 m->m_flags &= ~M_PKTHDR;
776 m->m_len = DATA_SIZE;
777 sc->macb_cdata.rxtail->m_next = m;
778 sc->macb_cdata.rxtail = m;
781 if (flags & RD_EOF) {
784 sc->macb_cdata.rxtail->m_len = (rxbytes -
785 ((nsegs - 1) * DATA_SIZE)) + 2;
788 m = sc->macb_cdata.rxhead;
789 m->m_flags |= M_PKTHDR;
790 m->m_pkthdr.len = rxbytes;
791 m->m_pkthdr.rcvif = ifp;
792 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
796 (*ifp->if_input)(ifp, m);
798 sc->macb_cdata.rxhead = NULL;
799 sc->macb_cdata.rxtail = NULL;
803 rxdesc->addr &= ~RD_OWN;
805 MACB_DESC_INC(sc->rx_cons, MACB_MAX_RX_BUFFERS);
807 rxdesc = &(sc->desc_rx[sc->rx_cons]);
810 write_4(sc, EMAC_IER, (RCOMP_INTERRUPT|RXUBR_INTERRUPT));
815 macb_intr_rx_locked(struct macb_softc *sc, int count)
822 macb_intr_task(void *arg, int pending __unused)
824 struct macb_softc *sc;
828 macb_intr_rx_locked(sc, -1);
835 struct macb_softc *sc;
841 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
842 printf("not running\n");
847 status = read_4(sc, EMAC_ISR);
850 if (status & RCOMP_INTERRUPT) {
851 write_4(sc, EMAC_IDR, (RCOMP_INTERRUPT|RXUBR_INTERRUPT));
852 taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
855 if (status & TCOMP_INTERRUPT) {
859 status = read_4(sc, EMAC_ISR);
862 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
863 macbstart_locked(ifp);
868 macb_encap(struct macb_softc *sc, struct mbuf **m_head)
870 struct eth_tx_desc *desc;
871 struct tx_desc_info *txd, *txd_last;
873 bus_dma_segment_t segs[MAX_FRAGMENT];
876 int error, i, nsegs, prod, si;
878 M_ASSERTPKTHDR((*m_head));
884 txd = txd_last = &sc->tx_desc[prod];
885 error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, txd->dmamap,
886 *m_head, segs, &nsegs, 0);
887 if (error == EFBIG) {
888 m = m_collapse(*m_head, M_NOWAIT, MAX_FRAGMENT);
895 error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, txd->dmamap,
896 *m_head, segs, &nsegs, 0);
902 } else if (error != 0) {
905 /* Check for TX descriptor overruns. */
906 if (sc->tx_cnt + nsegs > MACB_MAX_TX_BUFFERS - 1) {
907 bus_dmamap_unload(sc->dmatag_ring_tx, txd->dmamap);
910 bus_dmamap_sync(sc->dmatag_ring_tx, txd->dmamap, BUS_DMASYNC_PREWRITE);
913 /* TODO: VLAN hardware tag insertion. */
919 for (i = 0; i < nsegs; i++) {
920 desc = &sc->desc_tx[prod];
921 desc->addr = segs[i].ds_addr;
924 desc->flags = segs[i].ds_len | TD_OWN;
926 desc->flags = segs[i].ds_len;
929 if (prod == MACB_MAX_TX_BUFFERS - 1)
930 desc->flags |= TD_WRAP_MASK;
933 MACB_DESC_INC(prod, MACB_MAX_TX_BUFFERS);
936 * Set EOP on the last fragment.
939 desc->flags |= TD_LAST;
940 desc = &sc->desc_tx[si];
941 desc->flags &= ~TD_OWN;
945 /* Swap the first dma map and the last. */
946 map = txd_last->dmamap;
947 txd_last->dmamap = txd->dmamap;
956 macbstart_locked(struct ifnet *ifp)
961 struct macb_softc *sc;
970 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
971 IFF_DRV_RUNNING || (sc->flags & MACB_FLAG_LINK) == 0) {
975 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
976 /* Get packet from the queue */
977 IF_DEQUEUE(&ifp->if_snd, m0);
981 if (m0->m_next != NULL) {
982 /* Fragmented mbuf chain, collapse it. */
983 m_new = m_defrag(m0, M_NOWAIT);
985 /* Original frame freed. */
988 /* Defragmentation failed, just use the chain. */
992 if (macb_encap(sc, &m0)) {
995 IF_PREPEND(&ifp->if_snd, m0);
996 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1002 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1003 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1005 bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
1006 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1007 write_4(sc, EMAC_NCR, read_4(sc, EMAC_NCR) | TRANSMIT_START);
1008 sc->macb_watchdog_timer = MACB_TIMEOUT;
1015 struct macb_softc *sc = xsc;
1018 macbinit_locked(sc);
1023 macbstart(struct ifnet *ifp)
1025 struct macb_softc *sc = ifp->if_softc;
1026 MACB_ASSERT_UNLOCKED(sc);
1028 macbstart_locked(ifp);
1035 macbstop(struct macb_softc *sc)
1037 struct ifnet *ifp = sc->ifp;
1038 struct rx_desc_info *rd;
1039 struct tx_desc_info *td;
1044 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1048 sc->flags &= ~MACB_FLAG_LINK;
1049 callout_stop(&sc->tick_ch);
1050 sc->macb_watchdog_timer = 0;
1052 /* Free TX/RX mbufs still in the queues. */
1053 for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
1054 td = &sc->tx_desc[i];
1055 if (td->buff != NULL) {
1056 bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
1057 BUS_DMASYNC_POSTWRITE);
1058 bus_dmamap_unload(sc->dmatag_data_tx, td->dmamap);
1063 for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
1064 rd = &sc->rx_desc[i];
1065 if (rd->buff != NULL) {
1066 bus_dmamap_sync(sc->dmatag_ring_rx, rd->dmamap,
1067 BUS_DMASYNC_POSTREAD);
1068 bus_dmamap_unload(sc->dmatag_data_rx, rd->dmamap);
1076 get_hash_index(uint8_t *mac)
1083 for (i = 0; i < 6; i++) {
1085 for (j = 0; j < 8; j++) {
1087 bit ^= (mac[k/8] & (1 << (k % 8)) ) != 0;
1095 set_mac_filter(uint32_t *filter, uint8_t *mac)
1099 bits = get_hash_index(mac);
1100 filter[bits >> 5] |= 1 << (bits & 31);
1104 set_filter(struct macb_softc *sc)
1107 struct ifmultiaddr *ifma;
1110 uint32_t multicast_filter[2];
1114 config = read_4(sc, EMAC_NCFGR);
1116 config &= ~(CFG_CAF | CFG_MTI);
1117 write_4(sc, EMAC_HRB, 0);
1118 write_4(sc, EMAC_HRT, 0);
1120 if ((ifp->if_flags & (IFF_ALLMULTI |IFF_PROMISC)) != 0){
1121 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
1122 write_4(sc, EMAC_HRB, ~0);
1123 write_4(sc, EMAC_HRT, ~0);
1126 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1129 write_4(sc, EMAC_NCFGR, config);
1133 if_maddr_rlock(ifp);
1135 multicast_filter[0] = 0;
1136 multicast_filter[1] = 0;
1138 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1139 if (ifma->ifma_addr->sa_family != AF_LINK)
1142 set_mac_filter(multicast_filter,
1143 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1146 write_4(sc, EMAC_HRB, multicast_filter[0]);
1147 write_4(sc, EMAC_HRT, multicast_filter[1]);
1148 write_4(sc, EMAC_NCFGR, config|CFG_MTI);
1150 if_maddr_runlock(ifp);
1154 macbioctl(struct ifnet * ifp, u_long cmd, caddr_t data)
1157 struct macb_softc *sc = ifp->if_softc;
1158 struct mii_data *mii;
1159 struct ifreq *ifr = (struct ifreq *)data;
1167 if ((ifp->if_flags & IFF_UP) != 0) {
1168 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1169 if (((ifp->if_flags ^ sc->if_flags)
1170 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1173 macbinit_locked(sc);
1175 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1178 sc->if_flags = ifp->if_flags;
1184 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1191 mii = device_get_softc(sc->miibus);
1192 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1195 error = ether_ioctl(ifp, cmd, data);
1202 /* bus entry points */
1205 macb_probe(device_t dev)
1208 if (!ofw_bus_is_compatible(dev, "cdns,at32ap7000-macb"))
1212 device_set_desc(dev, "macb");
1217 * Change media according to request.
1220 macb_ifmedia_upd(struct ifnet *ifp)
1222 struct macb_softc *sc = ifp->if_softc;
1223 struct mii_data *mii;
1225 mii = device_get_softc(sc->miibus);
1233 * Notify the world which media we're using.
1236 macb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1238 struct macb_softc *sc = ifp->if_softc;
1239 struct mii_data *mii;
1241 mii = device_get_softc(sc->miibus);
1244 /* Don't report link state if driver is not running. */
1245 if ((ifp->if_flags & IFF_UP) == 0) {
1250 ifmr->ifm_active = mii->mii_media_active;
1251 ifmr->ifm_status = mii->mii_media_status;
1256 macb_reset(struct macb_softc *sc)
1261 write_4(sc, EMAC_NCR, 0);
1263 write_4(sc, EMAC_NCR, CLEAR_STAT);
1265 /* Clear all status flags */
1266 write_4(sc, EMAC_TSR, ~0UL);
1267 write_4(sc, EMAC_RSR, ~0UL);
1269 /* Disable all interrupts */
1270 write_4(sc, EMAC_IDR, ~0UL);
1271 read_4(sc, EMAC_ISR);
1277 macb_get_mac(struct macb_softc *sc, u_char *eaddr)
1282 bottom = read_4(sc, EMAC_SA1B);
1283 top = read_4(sc, EMAC_SA1T);
1285 eaddr[0] = bottom & 0xff;
1286 eaddr[1] = (bottom >> 8) & 0xff;
1287 eaddr[2] = (bottom >> 16) & 0xff;
1288 eaddr[3] = (bottom >> 24) & 0xff;
1289 eaddr[4] = top & 0xff;
1290 eaddr[5] = (top >> 8) & 0xff;
1298 * We have to know if we're using MII or RMII attachment
1299 * for the MACB to talk to the PHY correctly. With FDT,
1300 * we must use rmii if there's a proprety phy-mode
1301 * equal to "rmii". Otherwise we MII mode is used.
1304 macb_set_rmii(struct macb_softc *sc)
1310 node = ofw_bus_get_node(sc->dev);
1311 memset(prop, 0 ,sizeof(prop));
1312 len = OF_getproplen(node, "phy-mode");
1315 if (OF_getprop(node, "phy-mode", prop, len) != len)
1317 if (strncmp(prop, "rmii", 4) == 0)
1318 sc->use_rmii = USRIO_RMII;
1322 * We have to know if we're using MII or RMII attachment
1323 * for the MACB to talk to the PHY correctly. Without FDT,
1324 * there's no good way to do this. So, if the config file
1325 * has 'option AT91_MACB_USE_RMII', then we'll force RMII.
1326 * Otherwise, we'll use what the bootloader setup. Either
1327 * it setup RMII or MII, in which case we'll get it right,
1328 * or it did nothing, and we'll fall back to MII and the
1329 * option would override if present.
1332 macb_set_rmii(struct macb_softc *sc)
1334 #ifdef AT91_MACB_USE_RMII
1335 sc->use_rmii = USRIO_RMII;
1337 sc->use_rmii = read_4(sc, EMAC_USRIO) & USRIO_RMII;
1343 macb_attach(device_t dev)
1345 struct macb_softc *sc;
1346 struct ifnet *ifp = NULL;
1347 struct sysctl_ctx_list *sctx;
1348 struct sysctl_oid *soid;
1350 u_char eaddr[ETHER_ADDR_LEN];
1353 struct at91_pmc_clock *master;
1358 sc = device_get_softc(dev);
1363 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1366 * Allocate resources.
1369 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1371 if (sc->mem_res == NULL) {
1372 device_printf(dev, "could not allocate memory resources.\n");
1377 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1379 if (sc->irq_res == NULL) {
1380 device_printf(dev, "could not allocate interrupt resources.\n");
1386 sc->clk = at91_pmc_clock_ref(device_get_nameunit(sc->dev));
1387 at91_pmc_clock_enable(sc->clk);
1390 macb_get_mac(sc, eaddr);
1392 master = at91_pmc_clock_ref("mck");
1394 pclk_hz = master->hz;
1396 sc->clock = CFG_CLK_8;
1397 if (pclk_hz <= 20000000)
1398 sc->clock = CFG_CLK_8;
1399 else if (pclk_hz <= 40000000)
1400 sc->clock = CFG_CLK_16;
1401 else if (pclk_hz <= 80000000)
1402 sc->clock = CFG_CLK_32;
1404 sc->clock = CFG_CLK_64;
1406 sc->clock = sc->clock << 10;
1409 write_4(sc, EMAC_NCFGR, sc->clock);
1410 write_4(sc, EMAC_USRIO, USRIO_CLOCK | sc->use_rmii); //enable clock
1412 write_4(sc, EMAC_NCR, MPE_ENABLE); //enable MPE
1414 sc->ifp = ifp = if_alloc(IFT_ETHER);
1415 err = mii_attach(dev, &sc->miibus, ifp, macb_ifmedia_upd,
1416 macb_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1418 device_printf(dev, "attaching PHYs failed\n");
1422 if (macb_allocate_dma(sc) != 0)
1426 sctx = device_get_sysctl_ctx(dev);
1427 soid = device_get_sysctl_tree(dev);
1430 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1431 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1432 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1433 ifp->if_capenable |= IFCAP_VLAN_MTU; /* The hw bits already set. */
1434 ifp->if_start = macbstart;
1435 ifp->if_ioctl = macbioctl;
1436 ifp->if_init = macbinit;
1437 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1438 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
1439 IFQ_SET_READY(&ifp->if_snd);
1440 sc->if_flags = ifp->if_flags;
1442 TASK_INIT(&sc->sc_intr_task, 0, macb_intr_task, sc);
1444 sc->sc_tq = taskqueue_create_fast("macb_taskq", M_WAITOK,
1445 taskqueue_thread_enqueue, &sc->sc_tq);
1446 if (sc->sc_tq == NULL) {
1447 device_printf(sc->dev, "could not create taskqueue\n");
1451 ether_ifattach(ifp, eaddr);
1454 * Activate the interrupt.
1456 err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1457 NULL, macb_intr, sc, &sc->intrhand);
1459 device_printf(dev, "could not establish interrupt handler.\n");
1460 ether_ifdetach(ifp);
1464 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
1465 device_get_nameunit(sc->dev));
1467 sc->macb_cdata.rxhead = 0;
1468 sc->macb_cdata.rxtail = 0;
1470 phy_write(sc, 0, 0, 0x3300); //force autoneg
1479 macb_detach(device_t dev)
1481 struct macb_softc *sc;
1483 sc = device_get_softc(dev);
1484 ether_ifdetach(sc->ifp);
1488 callout_drain(&sc->tick_ch);
1489 bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1490 taskqueue_drain(sc->sc_tq, &sc->sc_intr_task);
1491 taskqueue_free(sc->sc_tq);
1492 macb_deactivate(dev);
1493 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res);
1494 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res);
1495 MACB_LOCK_DESTROY(sc);
1500 /*PHY related functions*/
1502 phy_read(struct macb_softc *sc, int phy, int reg)
1506 write_4(sc, EMAC_MAN, EMAC_MAN_REG_RD(phy, reg));
1507 while ((read_4(sc, EMAC_SR) & EMAC_SR_IDLE) == 0)
1509 val = read_4(sc, EMAC_MAN) & EMAC_MAN_VALUE_MASK;
1515 phy_write(struct macb_softc *sc, int phy, int reg, int data)
1518 write_4(sc, EMAC_MAN, EMAC_MAN_REG_WR(phy, reg, data));
1519 while ((read_4(sc, EMAC_SR) & EMAC_SR_IDLE) == 0)
1526 * MII bus support routines.
1529 macb_miibus_readreg(device_t dev, int phy, int reg)
1531 struct macb_softc *sc;
1532 sc = device_get_softc(dev);
1533 return (phy_read(sc, phy, reg));
1537 macb_miibus_writereg(device_t dev, int phy, int reg, int data)
1539 struct macb_softc *sc;
1540 sc = device_get_softc(dev);
1541 return (phy_write(sc, phy, reg, data));
1545 macb_child_detached(device_t dev, device_t child)
1547 struct macb_softc *sc;
1548 sc = device_get_softc(dev);
1553 macb_miibus_statchg(device_t dev)
1555 struct macb_softc *sc;
1556 struct mii_data *mii;
1559 sc = device_get_softc(dev);
1561 mii = device_get_softc(sc->miibus);
1563 sc->flags &= ~MACB_FLAG_LINK;
1565 config = read_4(sc, EMAC_NCFGR);
1567 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1568 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1570 config &= ~(CFG_SPD);
1571 sc->flags |= MACB_FLAG_LINK;
1575 sc->flags |= MACB_FLAG_LINK;
1583 write_4(sc, EMAC_NCFGR, config);
1586 static device_method_t macb_methods[] = {
1587 /* Device interface */
1588 DEVMETHOD(device_probe, macb_probe),
1589 DEVMETHOD(device_attach, macb_attach),
1590 DEVMETHOD(device_detach, macb_detach),
1593 DEVMETHOD(bus_child_detached, macb_child_detached),
1596 DEVMETHOD(miibus_readreg, macb_miibus_readreg),
1597 DEVMETHOD(miibus_writereg, macb_miibus_writereg),
1598 DEVMETHOD(miibus_statchg, macb_miibus_statchg),
1602 static driver_t macb_driver = {
1605 sizeof(struct macb_softc),
1610 DRIVER_MODULE(macb, simplebus, macb_driver, macb_devclass, NULL, NULL);
1612 DRIVER_MODULE(macb, atmelarm, macb_driver, macb_devclass, 0, 0);
1614 DRIVER_MODULE(miibus, macb, miibus_driver, miibus_devclass, 0, 0);
1615 MODULE_DEPEND(macb, miibus, 1, 1, 1);
1616 MODULE_DEPEND(macb, ether, 1, 1, 1);