2 * Copyright (c) 2010 Yohanes Nugroho <yohanes@gmail.com>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 #include <sys/sysctl.h>
42 #include <sys/taskqueue.h>
44 #include <net/ethernet.h>
46 #include <net/if_arp.h>
47 #include <net/if_dl.h>
48 #include <net/if_media.h>
49 #include <net/if_types.h>
50 #include <net/if_vlan_var.h>
53 #include <netinet/in.h>
54 #include <netinet/in_systm.h>
55 #include <netinet/in_var.h>
56 #include <netinet/ip.h>
60 #include <net/bpfdesc.h>
62 #include <dev/mii/mii.h>
63 #include <dev/mii/miivar.h>
65 #include <arm/at91/at91_pmcvar.h>
66 #include <arm/at91/if_macbreg.h>
67 #include <arm/at91/if_macbvar.h>
68 #include <arm/at91/at91_piovar.h>
70 #include <arm/at91/at91sam9g20reg.h>
72 #include <machine/bus.h>
73 #include <machine/intr.h>
75 /* "device miibus" required. See GENERIC if you get errors here. */
76 #include "miibus_if.h"
79 #define MACB_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
80 #define MACB_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
81 #define MACB_LOCK_INIT(_sc) \
82 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
83 MTX_NETWORK_LOCK, MTX_DEF)
84 #define MACB_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
85 #define MACB_LOCK_ASSERT(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
86 #define MACB_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
89 static inline uint32_t
90 read_4(struct macb_softc *sc, bus_size_t off)
93 return (bus_read_4(sc->mem_res, off));
97 write_4(struct macb_softc *sc, bus_size_t off, uint32_t val)
100 bus_write_4(sc->mem_res, off, val);
104 static devclass_t macb_devclass;
106 /* ifnet entry points */
108 static void macbinit_locked(void *);
109 static void macbstart_locked(struct ifnet *);
111 static void macbinit(void *);
112 static void macbstart(struct ifnet *);
113 static void macbstop(struct macb_softc *);
114 static int macbioctl(struct ifnet * ifp, u_long, caddr_t);
116 /* bus entry points */
118 static int macb_probe(device_t dev);
119 static int macb_attach(device_t dev);
120 static int macb_detach(device_t dev);
122 /* helper functions */
124 macb_new_rxbuf(struct macb_softc *sc, int index);
127 macb_free_desc_dma_tx(struct macb_softc *sc);
130 macb_free_desc_dma_rx(struct macb_softc *sc);
133 macb_init_desc_dma_tx(struct macb_softc *sc);
136 macb_watchdog(struct macb_softc *sc);
138 static int macb_intr_rx_locked(struct macb_softc *sc, int count);
139 static void macb_intr_task(void *arg, int pending __unused);
140 static void macb_intr(void *xsc);
143 macb_tx_cleanup(struct macb_softc *sc);
146 phy_write(struct macb_softc *sc, int phy, int reg, int data);
148 static void macb_reset(struct macb_softc *sc);
151 macb_deactivate(device_t dev)
153 struct macb_softc *sc;
155 sc = device_get_softc(dev);
157 macb_free_desc_dma_tx(sc);
158 macb_free_desc_dma_rx(sc);
163 macb_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
167 KASSERT(nsegs == 1, ("wrong number of segments, should be 1"));
169 *paddr = segs->ds_addr;
173 macb_alloc_desc_dma_tx(struct macb_softc *sc)
177 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
178 error = bus_dma_tag_create(sc->sc_parent_tag, /* parent */
179 16, 0, /* alignment, boundary */
180 BUS_SPACE_MAXADDR, /* lowaddr */
181 BUS_SPACE_MAXADDR, /* highaddr */
182 NULL, NULL, /* filtfunc, filtfuncarg */
183 sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS, /* max size */
185 sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS,
187 NULL, NULL, /* lockfunc, lockfuncarg */
188 &sc->dmatag_data_tx); /* dmat */
190 device_printf(sc->dev,
191 "Couldn't create TX descriptor dma tag\n");
194 /* Allocate memory for TX ring. */
195 error = bus_dmamem_alloc(sc->dmatag_data_tx,
196 (void**)&(sc->desc_tx), BUS_DMA_NOWAIT | BUS_DMA_ZERO |
197 BUS_DMA_COHERENT, &sc->dmamap_ring_tx);
199 device_printf(sc->dev, "failed to allocate TX dma memory\n");
203 error = bus_dmamap_load(sc->dmatag_data_tx, sc->dmamap_ring_tx,
204 sc->desc_tx, sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS,
205 macb_getaddr, &sc->ring_paddr_tx, BUS_DMA_NOWAIT);
207 device_printf(sc->dev, "can't load TX descriptor dma map\n");
210 /* Allocate a busdma tag for mbufs. No alignment restriction applys. */
211 error = bus_dma_tag_create(sc->sc_parent_tag, /* parent */
212 1, 0, /* alignment, boundary */
213 BUS_SPACE_MAXADDR, /* lowaddr */
214 BUS_SPACE_MAXADDR, /* highaddr */
215 NULL, NULL, /* filtfunc, filtfuncarg */
216 MCLBYTES * MAX_FRAGMENT, /* maxsize */
217 MAX_FRAGMENT, /* nsegments */
218 MCLBYTES, 0, /* maxsegsz, flags */
219 NULL, NULL, /* lockfunc, lockfuncarg */
220 &sc->dmatag_ring_tx); /* dmat */
222 device_printf(sc->dev, "failed to create TX mbuf dma tag\n");
226 for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
227 /* Create dma map for each descriptor. */
228 error = bus_dmamap_create(sc->dmatag_ring_tx, 0,
229 &sc->tx_desc[i].dmamap);
231 device_printf(sc->dev,
232 "failed to create TX mbuf dma map\n");
240 macb_free_desc_dma_tx(struct macb_softc *sc)
242 struct tx_desc_info *td;
246 if (sc->dmatag_ring_tx != NULL) {
247 for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
248 td = &sc->tx_desc[i];
249 if (td->dmamap != NULL) {
250 bus_dmamap_destroy(sc->dmatag_ring_tx,
255 bus_dma_tag_destroy(sc->dmatag_ring_tx);
256 sc->dmatag_ring_tx = NULL;
259 /* TX descriptor ring. */
260 if (sc->dmatag_data_tx != NULL) {
261 if (sc->ring_paddr_tx != 0)
262 bus_dmamap_unload(sc->dmatag_data_tx,
264 if (sc->desc_tx != NULL)
265 bus_dmamem_free(sc->dmatag_data_tx, sc->desc_tx,
267 sc->ring_paddr_tx = 0;
269 bus_dma_tag_destroy(sc->dmatag_data_tx);
270 sc->dmatag_data_tx = NULL;
275 macb_init_desc_dma_tx(struct macb_softc *sc)
277 struct eth_tx_desc *desc;
280 MACB_LOCK_ASSERT(sc);
286 desc = &sc->desc_tx[0];
287 bzero(desc, sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS);
289 for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
290 desc = &sc->desc_tx[i];
291 if (i == MACB_MAX_TX_BUFFERS - 1)
292 desc->flags = TD_OWN | TD_WRAP_MASK;
294 desc->flags = TD_OWN;
297 bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
298 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
302 macb_alloc_desc_dma_rx(struct macb_softc *sc)
306 /* Allocate a busdma tag and DMA safe memory for RX descriptors. */
307 error = bus_dma_tag_create(sc->sc_parent_tag, /* parent */
308 16, 0, /* alignment, boundary */
309 BUS_SPACE_MAXADDR, /* lowaddr */
310 BUS_SPACE_MAXADDR, /* highaddr */
311 NULL, NULL, /* filtfunc, filtfuncarg */
312 /* maxsize, nsegments */
313 sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS, 1,
314 /* maxsegsz, flags */
315 sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS, 0,
316 NULL, NULL, /* lockfunc, lockfuncarg */
317 &sc->dmatag_data_rx); /* dmat */
319 device_printf(sc->dev,
320 "Couldn't create RX descriptor dma tag\n");
323 /* Allocate RX ring. */
324 error = bus_dmamem_alloc(sc->dmatag_data_rx, (void**)&(sc->desc_rx),
325 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
326 &sc->dmamap_ring_rx);
328 device_printf(sc->dev,
329 "failed to allocate RX descriptor dma memory\n");
334 error = bus_dmamap_load(sc->dmatag_data_rx, sc->dmamap_ring_rx,
335 sc->desc_rx, sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS,
336 macb_getaddr, &sc->ring_paddr_rx, BUS_DMA_NOWAIT);
338 device_printf(sc->dev, "can't load RX descriptor dma map\n");
342 /* Allocate a busdma tag for mbufs. */
343 error = bus_dma_tag_create(sc->sc_parent_tag,/* parent */
344 16, 0, /* alignment, boundary */
345 BUS_SPACE_MAXADDR, /* lowaddr */
346 BUS_SPACE_MAXADDR, /* highaddr */
347 NULL, NULL, /* filtfunc, filtfuncarg */
348 MCLBYTES, 1, /* maxsize, nsegments */
349 MCLBYTES, 0, /* maxsegsz, flags */
350 NULL, NULL, /* lockfunc, lockfuncarg */
351 &sc->dmatag_ring_rx); /* dmat */
354 device_printf(sc->dev, "failed to create RX mbuf dma tag\n");
358 for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
359 error = bus_dmamap_create(sc->dmatag_ring_rx, 0,
360 &sc->rx_desc[i].dmamap);
362 device_printf(sc->dev,
363 "failed to create RX mbuf dmamap\n");
372 macb_free_desc_dma_rx(struct macb_softc *sc)
374 struct rx_desc_info *rd;
378 if (sc->dmatag_ring_rx != NULL) {
379 for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
380 rd = &sc->rx_desc[i];
381 if (rd->dmamap != NULL) {
382 bus_dmamap_destroy(sc->dmatag_ring_rx,
387 bus_dma_tag_destroy(sc->dmatag_ring_rx);
388 sc->dmatag_ring_rx = NULL;
390 /* RX descriptor ring. */
391 if (sc->dmatag_data_rx != NULL) {
392 if (sc->ring_paddr_rx != 0)
393 bus_dmamap_unload(sc->dmatag_data_rx,
395 if (sc->desc_rx != NULL)
396 bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx,
398 sc->ring_paddr_rx = 0;
400 bus_dma_tag_destroy(sc->dmatag_data_rx);
401 sc->dmatag_data_rx = NULL;
406 macb_init_desc_dma_rx(struct macb_softc *sc)
408 struct eth_rx_desc *desc;
409 struct rx_desc_info *rd;
412 MACB_LOCK_ASSERT(sc);
415 desc = &sc->desc_rx[0];
416 bzero(desc, sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS);
417 for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
418 rd = &sc->rx_desc[i];
420 if (macb_new_rxbuf(sc, i) != 0)
423 bus_dmamap_sync(sc->dmatag_ring_rx, sc->dmamap_ring_rx,
424 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
429 macb_new_rxbuf(struct macb_softc *sc, int index)
431 struct rx_desc_info *rd;
432 struct eth_rx_desc *desc;
434 bus_dma_segment_t seg[1];
437 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
440 m->m_len = m->m_pkthdr.len = MCLBYTES - ETHER_ALIGN;
441 rd = &sc->rx_desc[index];
442 bus_dmamap_unload(sc->dmatag_ring_rx, rd->dmamap);
443 error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_rx, rd->dmamap, m,
445 KASSERT(nsegs == 1, ("Too many segments returned!"));
451 bus_dmamap_sync(sc->dmatag_ring_rx, rd->dmamap, BUS_DMASYNC_PREREAD);
454 desc = &sc->desc_rx[index];
455 desc->addr = seg[0].ds_addr;
457 desc->flags = DATA_SIZE;
459 if (index == MACB_MAX_RX_BUFFERS - 1)
460 desc->addr |= RD_WRAP_MASK;
466 macb_allocate_dma(struct macb_softc *sc)
470 /* Create parent tag for tx and rx */
471 error = bus_dma_tag_create(
472 bus_get_dma_tag(sc->dev), /* parent */
473 1, 0, /* alignment, boundary */
474 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
475 BUS_SPACE_MAXADDR, /* highaddr */
476 NULL, NULL, /* filter, filterarg */
477 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
478 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
480 NULL, NULL, /* lockfunc, lockarg */
483 device_printf(sc->dev, "Couldn't create parent DMA tag\n");
487 if ((error = macb_alloc_desc_dma_tx(sc)) != 0)
489 if ((error = macb_alloc_desc_dma_rx(sc)) != 0)
498 struct macb_softc *sc;
499 struct mii_data *mii;
502 mii = device_get_softc(sc->miibus);
506 * Schedule another timeout one second from now.
508 callout_reset(&sc->tick_ch, hz, macb_tick, sc);
513 macb_watchdog(struct macb_softc *sc)
517 MACB_LOCK_ASSERT(sc);
519 if (sc->macb_watchdog_timer == 0 || --sc->macb_watchdog_timer)
523 if ((sc->flags & MACB_FLAG_LINK) == 0) {
524 if_printf(ifp, "watchdog timeout (missed link)\n");
525 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
529 if_printf(ifp, "watchdog timeout\n");
530 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
531 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
533 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
534 macbstart_locked(ifp);
540 macbinit_locked(void *xsc)
542 struct macb_softc *sc;
546 struct mii_data *mii;
551 MACB_LOCK_ASSERT(sc);
553 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
556 if ((err = macb_init_desc_dma_rx(sc)) != 0) {
557 device_printf(sc->dev, "no memory for RX buffers\n");
561 macb_init_desc_dma_tx(sc);
563 config = read_4(sc, EMAC_NCFGR) | (sc->clock << 10); /*set clock*/
564 config |= CFG_PAE; /* PAuse Enable */
565 config |= CFG_DRFCS; /* Discard Rx FCS */
566 config |= CFG_SPD; /* 100 mbps*/
570 config |= CFG_RBOF_2; /*offset +2*/
572 write_4(sc, EMAC_NCFGR, config);
574 /* Initialize TX and RX buffers */
575 write_4(sc, EMAC_RBQP, sc->ring_paddr_rx);
576 write_4(sc, EMAC_TBQP, sc->ring_paddr_tx);
578 /* Enable TX and RX */
579 write_4(sc, EMAC_NCR, RX_ENABLE | TX_ENABLE | MPE_ENABLE);
582 /* Enable interrupts */
583 write_4(sc, EMAC_IER, (RCOMP_INTERRUPT |
594 * Set 'running' flag, and clear output active flag
595 * and attempt to start the output
597 ifp->if_drv_flags |= IFF_DRV_RUNNING;
598 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
600 mii = device_get_softc(sc->miibus);
602 sc->flags |= MACB_FLAG_LINK;
606 callout_reset(&sc->tick_ch, hz, macb_tick, sc);
611 macb_tx_cleanup(struct macb_softc *sc)
614 struct eth_tx_desc *desc;
615 struct tx_desc_info *td;
620 MACB_LOCK_ASSERT(sc);
622 status = read_4(sc, EMAC_TSR);
624 write_4(sc, EMAC_TSR, status);
627 if ((status & TSR_UND) != 0) {
629 printf("underrun\n");
630 bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
631 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
632 sc->tx_cons = sc->tx_prod = 0;
633 for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
634 desc = &sc->desc_tx[i];
635 desc->flags = TD_OWN;
638 for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
639 td = &sc->tx_desc[i];
640 if (td->buff != NULL) {
641 /* We are finished with this descriptor. */
642 bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
643 BUS_DMASYNC_POSTWRITE);
644 /* ... and unload, so we can reuse. */
645 bus_dmamap_unload(sc->dmatag_data_tx,
653 if ((status & TSR_COMP) == 0)
657 if (sc->tx_cons == sc->tx_prod)
662 /* Prepare to read the ring (owner bit). */
663 bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
664 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
665 while (sc->tx_cons != sc->tx_prod) {
666 desc = &sc->desc_tx[sc->tx_cons];
667 if ((desc->flags & TD_OWN) == 0)
670 td = &sc->tx_desc[sc->tx_cons];
671 if (td->buff != NULL) {
672 /* We are finished with this descriptor. */
673 bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
674 BUS_DMASYNC_POSTWRITE);
675 /* ... and unload, so we can reuse. */
676 bus_dmamap_unload(sc->dmatag_data_tx,
680 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
685 MACB_DESC_INC(sc->tx_cons, MACB_MAX_TX_BUFFERS);
686 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
688 desc->flags = TD_OWN;
689 desc = &sc->desc_tx[sc->tx_cons];
690 if (flags & TD_LAST) {
693 } while (sc->tx_cons != sc->tx_prod);
696 /* Unarm watchog timer when there is no pending descriptors in queue. */
698 sc->macb_watchdog_timer = 0;
702 macb_rx(struct macb_softc *sc)
704 struct eth_rx_desc *rxdesc;
712 rxdesc = &(sc->desc_rx[sc->rx_cons]);
716 bus_dmamap_sync(sc->dmatag_ring_rx, sc->dmamap_ring_rx,
717 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
721 while (rxdesc->addr & RD_OWN) {
723 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
726 flags = rxdesc->flags;
728 rxbytes = flags & RD_LEN_MASK;
730 m = sc->rx_desc[sc->rx_cons].buff;
732 bus_dmamap_sync(sc->dmatag_ring_rx,
733 sc->rx_desc[sc->rx_cons].dmamap, BUS_DMASYNC_POSTREAD);
734 if (macb_new_rxbuf(sc, sc->rx_cons) != 0) {
735 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
739 rxdesc->flags = DATA_SIZE;
740 MACB_DESC_INC(sc->rx_cons, MACB_MAX_RX_BUFFERS);
741 if ((rxdesc->flags & RD_EOF) != 0)
743 rxdesc = &(sc->desc_rx[sc->rx_cons]);
744 } while (sc->rx_cons != first);
746 if (sc->macb_cdata.rxhead != NULL) {
747 m_freem(sc->macb_cdata.rxhead);
748 sc->macb_cdata.rxhead = NULL;
749 sc->macb_cdata.rxtail = NULL;
757 /* Chain received mbufs. */
758 if (sc->macb_cdata.rxhead == NULL) {
760 sc->macb_cdata.rxhead = m;
761 sc->macb_cdata.rxtail = m;
765 m->m_len = DATA_SIZE - 2;
767 m->m_flags &= ~M_PKTHDR;
768 m->m_len = DATA_SIZE;
769 sc->macb_cdata.rxtail->m_next = m;
770 sc->macb_cdata.rxtail = m;
773 if (flags & RD_EOF) {
776 sc->macb_cdata.rxtail->m_len = (rxbytes -
777 ((nsegs - 1) * DATA_SIZE)) + 2;
780 m = sc->macb_cdata.rxhead;
781 m->m_flags |= M_PKTHDR;
782 m->m_pkthdr.len = rxbytes;
783 m->m_pkthdr.rcvif = ifp;
784 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
788 (*ifp->if_input)(ifp, m);
790 sc->macb_cdata.rxhead = NULL;
791 sc->macb_cdata.rxtail = NULL;
795 rxdesc->addr &= ~RD_OWN;
797 MACB_DESC_INC(sc->rx_cons, MACB_MAX_RX_BUFFERS);
799 rxdesc = &(sc->desc_rx[sc->rx_cons]);
802 write_4(sc, EMAC_IER, (RCOMP_INTERRUPT|RXUBR_INTERRUPT));
807 macb_intr_rx_locked(struct macb_softc *sc, int count)
814 macb_intr_task(void *arg, int pending __unused)
816 struct macb_softc *sc;
820 macb_intr_rx_locked(sc, -1);
827 struct macb_softc *sc;
833 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
834 printf("not running\n");
839 status = read_4(sc, EMAC_ISR);
842 if (status & RCOMP_INTERRUPT) {
843 write_4(sc, EMAC_IDR, (RCOMP_INTERRUPT|RXUBR_INTERRUPT));
844 taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
847 if (status & TCOMP_INTERRUPT) {
851 status = read_4(sc, EMAC_ISR);
854 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
855 macbstart_locked(ifp);
860 macb_encap(struct macb_softc *sc, struct mbuf **m_head)
862 struct eth_tx_desc *desc;
863 struct tx_desc_info *txd, *txd_last;
865 bus_dma_segment_t segs[MAX_FRAGMENT];
868 int error, i, nsegs, prod, si;
870 M_ASSERTPKTHDR((*m_head));
876 txd = txd_last = &sc->tx_desc[prod];
877 error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, txd->dmamap,
878 *m_head, segs, &nsegs, 0);
879 if (error == EFBIG) {
880 m = m_collapse(*m_head, M_NOWAIT, MAX_FRAGMENT);
887 error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, txd->dmamap,
888 *m_head, segs, &nsegs, 0);
894 } else if (error != 0) {
897 /* Check for TX descriptor overruns. */
898 if (sc->tx_cnt + nsegs > MACB_MAX_TX_BUFFERS - 1) {
899 bus_dmamap_unload(sc->dmatag_ring_tx, txd->dmamap);
902 bus_dmamap_sync(sc->dmatag_ring_tx, txd->dmamap, BUS_DMASYNC_PREWRITE);
905 /* TODO: VLAN hardware tag insertion. */
911 for (i = 0; i < nsegs; i++) {
912 desc = &sc->desc_tx[prod];
913 desc->addr = segs[i].ds_addr;
916 desc->flags = segs[i].ds_len | TD_OWN;
918 desc->flags = segs[i].ds_len;
921 if (prod == MACB_MAX_TX_BUFFERS - 1)
922 desc->flags |= TD_WRAP_MASK;
925 MACB_DESC_INC(prod, MACB_MAX_TX_BUFFERS);
928 * Set EOP on the last fragment.
931 desc->flags |= TD_LAST;
932 desc = &sc->desc_tx[si];
933 desc->flags &= ~TD_OWN;
937 /* Swap the first dma map and the last. */
938 map = txd_last->dmamap;
939 txd_last->dmamap = txd->dmamap;
948 macbstart_locked(struct ifnet *ifp)
953 struct macb_softc *sc;
962 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
963 IFF_DRV_RUNNING || (sc->flags & MACB_FLAG_LINK) == 0) {
967 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
968 /* Get packet from the queue */
969 IF_DEQUEUE(&ifp->if_snd, m0);
973 if (m0->m_next != NULL) {
974 /* Fragmented mbuf chain, collapse it. */
975 m_new = m_defrag(m0, M_NOWAIT);
977 /* Original frame freed. */
980 /* Defragmentation failed, just use the chain. */
984 if (macb_encap(sc, &m0)) {
987 IF_PREPEND(&ifp->if_snd, m0);
988 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
994 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
995 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
997 bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
998 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
999 write_4(sc, EMAC_NCR, read_4(sc, EMAC_NCR) | TRANSMIT_START);
1000 sc->macb_watchdog_timer = MACB_TIMEOUT;
1007 struct macb_softc *sc = xsc;
1010 macbinit_locked(sc);
1015 macbstart(struct ifnet *ifp)
1017 struct macb_softc *sc = ifp->if_softc;
1018 MACB_ASSERT_UNLOCKED(sc);
1020 macbstart_locked(ifp);
1027 macbstop(struct macb_softc *sc)
1029 struct ifnet *ifp = sc->ifp;
1030 struct rx_desc_info *rd;
1031 struct tx_desc_info *td;
1036 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1040 sc->flags &= ~MACB_FLAG_LINK;
1041 callout_stop(&sc->tick_ch);
1042 sc->macb_watchdog_timer = 0;
1044 /* Free TX/RX mbufs still in the queues. */
1045 for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
1046 td = &sc->tx_desc[i];
1047 if (td->buff != NULL) {
1048 bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
1049 BUS_DMASYNC_POSTWRITE);
1050 bus_dmamap_unload(sc->dmatag_data_tx, td->dmamap);
1055 for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
1056 rd = &sc->rx_desc[i];
1057 if (rd->buff != NULL) {
1058 bus_dmamap_sync(sc->dmatag_ring_rx, rd->dmamap,
1059 BUS_DMASYNC_POSTREAD);
1060 bus_dmamap_unload(sc->dmatag_data_rx, rd->dmamap);
1068 get_hash_index(uint8_t *mac)
1075 for (i = 0; i < 6; i++) {
1077 for (j = 0; j < 8; j++) {
1079 bit ^= (mac[k/8] & (1 << (k % 8)) ) != 0;
1087 set_mac_filter(uint32_t *filter, uint8_t *mac)
1091 bits = get_hash_index(mac);
1092 filter[bits >> 5] |= 1 << (bits & 31);
1096 set_filter(struct macb_softc *sc)
1099 struct ifmultiaddr *ifma;
1102 uint32_t multicast_filter[2];
1106 config = read_4(sc, EMAC_NCFGR);
1108 config &= ~(CFG_CAF | CFG_MTI);
1109 write_4(sc, EMAC_HRB, 0);
1110 write_4(sc, EMAC_HRT, 0);
1112 if ((ifp->if_flags & (IFF_ALLMULTI |IFF_PROMISC)) != 0){
1113 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
1114 write_4(sc, EMAC_HRB, ~0);
1115 write_4(sc, EMAC_HRT, ~0);
1118 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1121 write_4(sc, EMAC_NCFGR, config);
1125 if_maddr_rlock(ifp);
1127 multicast_filter[0] = 0;
1128 multicast_filter[1] = 0;
1130 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1131 if (ifma->ifma_addr->sa_family != AF_LINK)
1134 set_mac_filter(multicast_filter,
1135 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1138 write_4(sc, EMAC_HRB, multicast_filter[0]);
1139 write_4(sc, EMAC_HRT, multicast_filter[1]);
1140 write_4(sc, EMAC_NCFGR, config|CFG_MTI);
1142 if_maddr_runlock(ifp);
1146 macbioctl(struct ifnet * ifp, u_long cmd, caddr_t data)
1149 struct macb_softc *sc = ifp->if_softc;
1150 struct mii_data *mii;
1151 struct ifreq *ifr = (struct ifreq *)data;
1159 if ((ifp->if_flags & IFF_UP) != 0) {
1160 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1161 if (((ifp->if_flags ^ sc->if_flags)
1162 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1165 macbinit_locked(sc);
1167 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1170 sc->if_flags = ifp->if_flags;
1176 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1183 mii = device_get_softc(sc->miibus);
1184 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1187 error = ether_ioctl(ifp, cmd, data);
1194 /* bus entry points */
1197 macb_probe(device_t dev)
1199 device_set_desc(dev, "macb");
1204 * Change media according to request.
1207 macb_ifmedia_upd(struct ifnet *ifp)
1209 struct macb_softc *sc = ifp->if_softc;
1210 struct mii_data *mii;
1212 mii = device_get_softc(sc->miibus);
1220 * Notify the world which media we're using.
1223 macb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1225 struct macb_softc *sc = ifp->if_softc;
1226 struct mii_data *mii;
1228 mii = device_get_softc(sc->miibus);
1231 /* Don't report link state if driver is not running. */
1232 if ((ifp->if_flags & IFF_UP) == 0) {
1237 ifmr->ifm_active = mii->mii_media_active;
1238 ifmr->ifm_status = mii->mii_media_status;
1243 macb_reset(struct macb_softc *sc)
1248 write_4(sc, EMAC_NCR, 0);
1250 write_4(sc, EMAC_NCR, CLEAR_STAT);
1252 /* Clear all status flags */
1253 write_4(sc, EMAC_TSR, ~0UL);
1254 write_4(sc, EMAC_RSR, ~0UL);
1256 /* Disable all interrupts */
1257 write_4(sc, EMAC_IDR, ~0UL);
1258 read_4(sc, EMAC_ISR);
1264 macb_get_mac(struct macb_softc *sc, u_char *eaddr)
1269 bottom = read_4(sc, EMAC_SA1B);
1270 top = read_4(sc, EMAC_SA1T);
1272 eaddr[0] = bottom & 0xff;
1273 eaddr[1] = (bottom >> 8) & 0xff;
1274 eaddr[2] = (bottom >> 16) & 0xff;
1275 eaddr[3] = (bottom >> 24) & 0xff;
1276 eaddr[4] = top & 0xff;
1277 eaddr[5] = (top >> 8) & 0xff;
1284 macb_attach(device_t dev)
1286 struct macb_softc *sc;
1287 struct ifnet *ifp = NULL;
1288 struct sysctl_ctx_list *sctx;
1289 struct sysctl_oid *soid;
1291 u_char eaddr[ETHER_ADDR_LEN];
1294 struct at91_pmc_clock *master;
1299 sc = device_get_softc(dev);
1304 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1307 * Allocate resources.
1310 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1312 if (sc->mem_res == NULL) {
1313 device_printf(dev, "could not allocate memory resources.\n");
1318 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1320 if (sc->irq_res == NULL) {
1321 device_printf(dev, "could not allocate interrupt resources.\n");
1327 sc->clk = at91_pmc_clock_ref(device_get_nameunit(sc->dev));
1328 at91_pmc_clock_enable(sc->clk);
1331 macb_get_mac(sc, eaddr);
1333 master = at91_pmc_clock_ref("mck");
1335 pclk_hz = master->hz;
1337 sc->clock = CFG_CLK_8;
1338 if (pclk_hz <= 20000000)
1339 sc->clock = CFG_CLK_8;
1340 else if (pclk_hz <= 40000000)
1341 sc->clock = CFG_CLK_16;
1342 else if (pclk_hz <= 80000000)
1343 sc->clock = CFG_CLK_32;
1345 sc->clock = CFG_CLK_64;
1347 sc->clock = sc->clock << 10;
1349 write_4(sc, EMAC_NCFGR, sc->clock);
1350 write_4(sc, EMAC_USRIO, USRIO_CLOCK); //enable clock
1352 write_4(sc, EMAC_NCR, MPE_ENABLE); //enable MPE
1354 sc->ifp = ifp = if_alloc(IFT_ETHER);
1355 err = mii_attach(dev, &sc->miibus, ifp, macb_ifmedia_upd,
1356 macb_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1358 device_printf(dev, "attaching PHYs failed\n");
1362 if (macb_allocate_dma(sc) != 0)
1366 sctx = device_get_sysctl_ctx(dev);
1367 soid = device_get_sysctl_tree(dev);
1370 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1371 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1372 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1373 ifp->if_capenable |= IFCAP_VLAN_MTU; /* The hw bits already set. */
1374 ifp->if_start = macbstart;
1375 ifp->if_ioctl = macbioctl;
1376 ifp->if_init = macbinit;
1377 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1378 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
1379 IFQ_SET_READY(&ifp->if_snd);
1380 sc->if_flags = ifp->if_flags;
1382 TASK_INIT(&sc->sc_intr_task, 0, macb_intr_task, sc);
1384 sc->sc_tq = taskqueue_create_fast("macb_taskq", M_WAITOK,
1385 taskqueue_thread_enqueue, &sc->sc_tq);
1386 if (sc->sc_tq == NULL) {
1387 device_printf(sc->dev, "could not create taskqueue\n");
1391 ether_ifattach(ifp, eaddr);
1394 * Activate the interrupt.
1396 err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1397 NULL, macb_intr, sc, &sc->intrhand);
1399 device_printf(dev, "could not establish interrupt handler.\n");
1400 ether_ifdetach(ifp);
1404 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
1405 device_get_nameunit(sc->dev));
1407 sc->macb_cdata.rxhead = 0;
1408 sc->macb_cdata.rxtail = 0;
1410 phy_write(sc, 0, 0, 0x3300); //force autoneg
1419 macb_detach(device_t dev)
1421 struct macb_softc *sc;
1423 sc = device_get_softc(dev);
1424 ether_ifdetach(sc->ifp);
1428 callout_drain(&sc->tick_ch);
1429 bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1430 taskqueue_drain(sc->sc_tq, &sc->sc_intr_task);
1431 taskqueue_free(sc->sc_tq);
1432 macb_deactivate(dev);
1433 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res);
1434 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res);
1435 MACB_LOCK_DESTROY(sc);
1440 /*PHY related functions*/
1442 phy_read(struct macb_softc *sc, int phy, int reg)
1446 write_4(sc, EMAC_MAN, EMAC_MAN_REG_RD(phy, reg));
1447 while ((read_4(sc, EMAC_SR) & EMAC_SR_IDLE) == 0)
1449 val = read_4(sc, EMAC_MAN) & EMAC_MAN_VALUE_MASK;
1455 phy_write(struct macb_softc *sc, int phy, int reg, int data)
1458 write_4(sc, EMAC_MAN, EMAC_MAN_REG_WR(phy, reg, data));
1459 while ((read_4(sc, EMAC_SR) & EMAC_SR_IDLE) == 0)
1466 * MII bus support routines.
1469 macb_miibus_readreg(device_t dev, int phy, int reg)
1471 struct macb_softc *sc;
1472 sc = device_get_softc(dev);
1473 return (phy_read(sc, phy, reg));
1477 macb_miibus_writereg(device_t dev, int phy, int reg, int data)
1479 struct macb_softc *sc;
1480 sc = device_get_softc(dev);
1481 return (phy_write(sc, phy, reg, data));
1485 macb_child_detached(device_t dev, device_t child)
1487 struct macb_softc *sc;
1488 sc = device_get_softc(dev);
1493 macb_miibus_statchg(device_t dev)
1495 struct macb_softc *sc;
1496 struct mii_data *mii;
1499 sc = device_get_softc(dev);
1501 mii = device_get_softc(sc->miibus);
1503 sc->flags &= ~MACB_FLAG_LINK;
1505 config = read_4(sc, EMAC_NCFGR);
1507 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1508 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1510 config &= ~(CFG_SPD);
1511 sc->flags |= MACB_FLAG_LINK;
1515 sc->flags |= MACB_FLAG_LINK;
1523 write_4(sc, EMAC_NCFGR, config);
1526 static device_method_t macb_methods[] = {
1527 /* Device interface */
1528 DEVMETHOD(device_probe, macb_probe),
1529 DEVMETHOD(device_attach, macb_attach),
1530 DEVMETHOD(device_detach, macb_detach),
1533 DEVMETHOD(bus_child_detached, macb_child_detached),
1536 DEVMETHOD(miibus_readreg, macb_miibus_readreg),
1537 DEVMETHOD(miibus_writereg, macb_miibus_writereg),
1538 DEVMETHOD(miibus_statchg, macb_miibus_statchg),
1542 static driver_t macb_driver = {
1545 sizeof(struct macb_softc),
1549 DRIVER_MODULE(macb, atmelarm, macb_driver, macb_devclass, 0, 0);
1550 DRIVER_MODULE(miibus, macb, miibus_driver, miibus_devclass, 0, 0);
1551 MODULE_DEPEND(macb, miibus, 1, 1, 1);
1552 MODULE_DEPEND(macb, ether, 1, 1, 1);