2 * Copyright (c) 2012-2013 Thomas Skibo
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * A network interface driver for Cadence GEM Gigabit Ethernet
29 * interface such as the one used in Xilinx Zynq-7000 SoC.
31 * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
32 * (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16
33 * and register definitions are in appendix B.18.
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
45 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sockio.h>
49 #include <sys/sysctl.h>
51 #include <machine/bus.h>
53 #include <net/ethernet.h>
55 #include <net/if_arp.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_mib.h>
59 #include <net/if_types.h>
62 #include <netinet/in.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in_var.h>
65 #include <netinet/ip.h>
69 #include <net/bpfdesc.h>
71 #include <dev/fdt/fdt_common.h>
72 #include <dev/ofw/ofw_bus.h>
73 #include <dev/ofw/ofw_bus_subr.h>
75 #include <dev/mii/mii.h>
76 #include <dev/mii/miivar.h>
78 #include <dev/cadence/if_cgem_hw.h>
80 #include "miibus_if.h"
82 #define IF_CGEM_NAME "cgem"
84 #define CGEM_NUM_RX_DESCS 256 /* size of receive descriptor ring */
85 #define CGEM_NUM_TX_DESCS 256 /* size of transmit descriptor ring */
87 #define MAX_DESC_RING_SIZE (MAX(CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),\
88 CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc)))
91 /* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */
92 #define DEFAULT_NUM_RX_BUFS 64 /* number of receive bufs to queue. */
94 #define TX_MAX_DMA_SEGS 4 /* maximum segs in a tx mbuf dma */
96 #define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \
97 CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
105 struct resource *mem_res;
106 struct resource *irq_res;
108 struct callout tick_ch;
109 uint32_t net_ctl_shadow;
112 bus_dma_tag_t desc_dma_tag;
113 bus_dma_tag_t mbuf_dma_tag;
115 /* receive descriptor ring */
116 struct cgem_rx_desc *rxring;
117 bus_addr_t rxring_physaddr;
118 struct mbuf *rxring_m[CGEM_NUM_RX_DESCS];
119 bus_dmamap_t rxring_m_dmamap[CGEM_NUM_RX_DESCS];
120 int rxring_hd_ptr; /* where to put rcv bufs */
121 int rxring_tl_ptr; /* where to get receives */
122 int rxring_queued; /* how many rcv bufs queued */
123 bus_dmamap_t rxring_dma_map;
124 int rxbufs; /* tunable number rcv bufs */
125 int rxoverruns; /* rx ring overruns */
127 /* transmit descriptor ring */
128 struct cgem_tx_desc *txring;
129 bus_addr_t txring_physaddr;
130 struct mbuf *txring_m[CGEM_NUM_TX_DESCS];
131 bus_dmamap_t txring_m_dmamap[CGEM_NUM_TX_DESCS];
132 int txring_hd_ptr; /* where to put next xmits */
133 int txring_tl_ptr; /* next xmit mbuf to free */
134 int txring_queued; /* num xmits segs queued */
135 bus_dmamap_t txring_dma_map;
138 #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off)))
139 #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val)))
140 #define BARRIER(sc, off, len, flags) \
141 (bus_barrier((sc)->mem_res, (off), (len), (flags))
143 #define CGEM_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
144 #define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
145 #define CGEM_LOCK_INIT(sc) \
146 mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \
147 MTX_NETWORK_LOCK, MTX_DEF)
148 #define CGEM_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx)
149 #define CGEM_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
151 static devclass_t cgem_devclass;
153 static int cgem_probe(device_t dev);
154 static int cgem_attach(device_t dev);
155 static int cgem_detach(device_t dev);
156 static void cgem_tick(void *);
157 static void cgem_intr(void *);
160 cgem_get_mac(struct cgem_softc *sc, u_char eaddr[])
165 /* See if boot loader gave us a MAC address already. */
166 for (i = 0; i < 4; i++) {
167 uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i));
168 uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff;
169 if (low != 0 || high != 0) {
170 eaddr[0] = low & 0xff;
171 eaddr[1] = (low >> 8) & 0xff;
172 eaddr[2] = (low >> 16) & 0xff;
173 eaddr[3] = (low >> 24) & 0xff;
174 eaddr[4] = high & 0xff;
175 eaddr[5] = (high >> 8) & 0xff;
180 /* No MAC from boot loader? Assign a random one. */
187 eaddr[3] = (rnd >> 16) & 0xff;
188 eaddr[4] = (rnd >> 8) & 0xff;
189 eaddr[5] = rnd & 0xff;
191 device_printf(sc->dev, "no mac address found, assigning "
192 "random: %02x:%02x:%02x:%02x:%02x:%02x\n",
193 eaddr[0], eaddr[1], eaddr[2],
194 eaddr[3], eaddr[4], eaddr[5]);
196 WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
197 (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
198 WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
202 /* cgem_mac_hash(): map 48-bit address to a 6-bit hash.
203 * The 6-bit hash corresponds to a bit in a 64-bit hash
204 * register. Setting that bit in the hash register enables
205 * reception of all frames with a destination address that hashes
206 * to that 6-bit value.
208 * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
209 * Reference Manual. Bits 0-5 in the hash are the exclusive-or of
210 * every sixth bit in the destination address.
213 cgem_mac_hash(u_char eaddr[])
219 for (i = 0; i < 6; i++)
220 for (j = i; j < 48; j += 6)
221 if ((eaddr[j >> 3] & (1 << (j & 7))) != 0)
227 /* After any change in rx flags or multi-cast addresses, set up
228 * hash registers and net config register bits.
231 cgem_rx_filter(struct cgem_softc *sc)
233 struct ifnet *ifp = sc->ifp;
234 struct ifmultiaddr *ifma;
236 uint32_t hash_hi, hash_lo;
242 net_cfg = RD4(sc, CGEM_NET_CFG);
244 net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
245 CGEM_NET_CFG_NO_BCAST |
246 CGEM_NET_CFG_COPY_ALL);
248 if ((ifp->if_flags & IFF_PROMISC) != 0)
249 net_cfg |= CGEM_NET_CFG_COPY_ALL;
251 if ((ifp->if_flags & IFF_BROADCAST) == 0)
252 net_cfg |= CGEM_NET_CFG_NO_BCAST;
253 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
254 hash_hi = 0xffffffff;
255 hash_lo = 0xffffffff;
258 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
259 if (ifma->ifma_addr->sa_family != AF_LINK)
261 index = cgem_mac_hash(
262 LLADDR((struct sockaddr_dl *)
265 hash_hi |= (1<<(index-32));
267 hash_lo |= (1<<index);
269 if_maddr_runlock(ifp);
272 if (hash_hi != 0 || hash_lo != 0)
273 net_cfg |= CGEM_NET_CFG_MULTI_HASH_EN;
276 WR4(sc, CGEM_HASH_TOP, hash_hi);
277 WR4(sc, CGEM_HASH_BOT, hash_lo);
278 WR4(sc, CGEM_NET_CFG, net_cfg);
281 /* For bus_dmamap_load() callback. */
283 cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
286 if (nsegs != 1 || error != 0)
288 *(bus_addr_t *)arg = segs[0].ds_addr;
291 /* Create DMA'able descriptor rings. */
293 cgem_setup_descs(struct cgem_softc *sc)
300 /* Allocate non-cached DMA space for RX and TX descriptors.
302 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
303 BUS_SPACE_MAXADDR_32BIT,
316 /* Set up a bus_dma_tag for mbufs. */
317 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
318 BUS_SPACE_MAXADDR_32BIT,
331 /* Allocate DMA memory in non-cacheable space. */
332 err = bus_dmamem_alloc(sc->desc_dma_tag,
333 (void **)&sc->rxring,
334 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
335 &sc->rxring_dma_map);
339 /* Load descriptor DMA memory. */
340 err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
342 CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),
343 cgem_getaddr, &sc->rxring_physaddr,
348 /* Initialize RX descriptors. */
349 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
350 sc->rxring[i].addr = CGEM_RXDESC_OWN;
351 sc->rxring[i].ctl = 0;
352 sc->rxring_m[i] = NULL;
353 err = bus_dmamap_create(sc->mbuf_dma_tag, 0,
354 &sc->rxring_m_dmamap[i]);
358 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
360 sc->rxring_hd_ptr = 0;
361 sc->rxring_tl_ptr = 0;
362 sc->rxring_queued = 0;
364 /* Allocate DMA memory for TX descriptors in non-cacheable space. */
365 err = bus_dmamem_alloc(sc->desc_dma_tag,
366 (void **)&sc->txring,
367 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
368 &sc->txring_dma_map);
372 /* Load TX descriptor DMA memory. */
373 err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map,
375 CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc),
376 cgem_getaddr, &sc->txring_physaddr,
381 /* Initialize TX descriptor ring. */
382 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
383 sc->txring[i].addr = 0;
384 sc->txring[i].ctl = CGEM_TXDESC_USED;
385 sc->txring_m[i] = NULL;
386 err = bus_dmamap_create(sc->mbuf_dma_tag, 0,
387 &sc->txring_m_dmamap[i]);
391 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
393 sc->txring_hd_ptr = 0;
394 sc->txring_tl_ptr = 0;
395 sc->txring_queued = 0;
400 /* Fill receive descriptor ring with mbufs. */
402 cgem_fill_rqueue(struct cgem_softc *sc)
404 struct mbuf *m = NULL;
405 bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
408 CGEM_ASSERT_LOCKED(sc);
410 while (sc->rxring_queued < sc->rxbufs) {
411 /* Get a cluster mbuf. */
412 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
417 m->m_pkthdr.len = MCLBYTES;
418 m->m_pkthdr.rcvif = sc->ifp;
420 /* Load map and plug in physical address. */
421 if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
422 sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
423 segs, &nsegs, BUS_DMA_NOWAIT)) {
428 sc->rxring_m[sc->rxring_hd_ptr] = m;
430 /* Sync cache with receive buffer. */
431 bus_dmamap_sync(sc->mbuf_dma_tag,
432 sc->rxring_m_dmamap[sc->rxring_hd_ptr],
433 BUS_DMASYNC_PREREAD);
435 /* Write rx descriptor and increment head pointer. */
436 sc->rxring[sc->rxring_hd_ptr].ctl = 0;
437 if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
438 sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
440 sc->rxring_hd_ptr = 0;
442 sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
448 /* Pull received packets off of receive descriptor ring. */
450 cgem_recv(struct cgem_softc *sc)
452 struct ifnet *ifp = sc->ifp;
456 CGEM_ASSERT_LOCKED(sc);
458 /* Pick up all packets in which the OWN bit is set. */
459 while (sc->rxring_queued > 0 &&
460 (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
462 ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
464 /* Grab filled mbuf. */
465 m = sc->rxring_m[sc->rxring_tl_ptr];
466 sc->rxring_m[sc->rxring_tl_ptr] = NULL;
468 /* Sync cache with receive buffer. */
469 bus_dmamap_sync(sc->mbuf_dma_tag,
470 sc->rxring_m_dmamap[sc->rxring_tl_ptr],
471 BUS_DMASYNC_POSTREAD);
474 bus_dmamap_unload(sc->mbuf_dma_tag,
475 sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
477 /* Increment tail pointer. */
478 if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS)
479 sc->rxring_tl_ptr = 0;
482 /* Check FCS and make sure entire packet landed in one mbuf
483 * cluster (which is much bigger than the largest ethernet
486 if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 ||
487 (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) !=
488 (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) {
495 /* Hand it off to upper layers. */
496 m->m_data += ETHER_ALIGN;
497 m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK);
498 m->m_pkthdr.rcvif = ifp;
499 m->m_pkthdr.len = m->m_len;
501 /* Are we using hardware checksumming? Check the
502 * status in the receive descriptor.
504 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
505 /* TCP or UDP checks out, IP checks out too. */
506 if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
507 CGEM_RXDESC_CKSUM_STAT_TCP_GOOD ||
508 (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
509 CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) {
510 m->m_pkthdr.csum_flags |=
511 CSUM_IP_CHECKED | CSUM_IP_VALID |
512 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
513 m->m_pkthdr.csum_data = 0xffff;
514 } else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
515 CGEM_RXDESC_CKSUM_STAT_IP_GOOD) {
516 /* Only IP checks out. */
517 m->m_pkthdr.csum_flags |=
518 CSUM_IP_CHECKED | CSUM_IP_VALID;
519 m->m_pkthdr.csum_data = 0xffff;
525 (*ifp->if_input)(ifp, m);
530 /* Find completed transmits and free their mbufs. */
532 cgem_clean_tx(struct cgem_softc *sc)
537 CGEM_ASSERT_LOCKED(sc);
539 /* free up finished transmits. */
540 while (sc->txring_queued > 0 &&
541 ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
542 CGEM_TXDESC_USED) != 0) {
544 /* Sync cache. nop? */
545 bus_dmamap_sync(sc->mbuf_dma_tag,
546 sc->txring_m_dmamap[sc->txring_tl_ptr],
547 BUS_DMASYNC_POSTWRITE);
549 /* Unload DMA map. */
550 bus_dmamap_unload(sc->mbuf_dma_tag,
551 sc->txring_m_dmamap[sc->txring_tl_ptr]);
553 /* Free up the mbuf. */
554 m = sc->txring_m[sc->txring_tl_ptr];
555 sc->txring_m[sc->txring_tl_ptr] = NULL;
558 /* Check the status. */
559 if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
560 /* Serious bus error. log to console. */
561 device_printf(sc->dev, "cgem_clean_tx: Whoa! "
562 "AHB error, addr=0x%x\n",
563 sc->txring[sc->txring_tl_ptr].addr);
564 } else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
565 CGEM_TXDESC_LATE_COLL)) != 0) {
566 sc->ifp->if_oerrors++;
568 sc->ifp->if_opackets++;
570 /* If the packet spanned more than one tx descriptor,
571 * skip descriptors until we find the end so that only
572 * start-of-frame descriptors are processed.
574 while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) {
575 if ((ctl & CGEM_TXDESC_WRAP) != 0)
576 sc->txring_tl_ptr = 0;
581 ctl = sc->txring[sc->txring_tl_ptr].ctl;
583 sc->txring[sc->txring_tl_ptr].ctl =
584 ctl | CGEM_TXDESC_USED;
587 /* Next descriptor. */
588 if ((ctl & CGEM_TXDESC_WRAP) != 0)
589 sc->txring_tl_ptr = 0;
596 /* Start transmits. */
598 cgem_start_locked(struct ifnet *ifp)
600 struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
602 bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
604 int i, nsegs, wrap, err;
606 CGEM_ASSERT_LOCKED(sc);
608 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0)
612 /* Check that there is room in the descriptor ring. */
613 if (sc->txring_queued >= CGEM_NUM_TX_DESCS -
614 TX_MAX_DMA_SEGS - 1) {
616 /* Try to make room. */
620 if (sc->txring_queued >= CGEM_NUM_TX_DESCS -
621 TX_MAX_DMA_SEGS - 1) {
622 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
627 /* Grab next transmit packet. */
628 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
633 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
634 sc->txring_m_dmamap[sc->txring_hd_ptr],
635 m, segs, &nsegs, BUS_DMA_NOWAIT);
637 /* Too many segments! defrag and try again. */
638 struct mbuf *m2 = m_defrag(m, M_NOWAIT);
645 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
646 sc->txring_m_dmamap[sc->txring_hd_ptr],
647 m, segs, &nsegs, BUS_DMA_NOWAIT);
654 sc->txring_m[sc->txring_hd_ptr] = m;
656 /* Sync tx buffer with cache. */
657 bus_dmamap_sync(sc->mbuf_dma_tag,
658 sc->txring_m_dmamap[sc->txring_hd_ptr],
659 BUS_DMASYNC_PREWRITE);
661 /* Set wrap flag if next packet might run off end of ring. */
662 wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
665 /* Fill in the TX descriptors back to front so that USED
666 * bit in first descriptor is cleared last.
668 for (i = nsegs - 1; i >= 0; i--) {
669 /* Descriptor address. */
670 sc->txring[sc->txring_hd_ptr + i].addr =
673 /* Descriptor control word. */
674 ctl = segs[i].ds_len;
675 if (i == nsegs - 1) {
676 ctl |= CGEM_TXDESC_LAST_BUF;
678 ctl |= CGEM_TXDESC_WRAP;
680 sc->txring[sc->txring_hd_ptr + i].ctl = ctl;
683 sc->txring_m[sc->txring_hd_ptr + i] = NULL;
687 sc->txring_hd_ptr = 0;
689 sc->txring_hd_ptr += nsegs;
690 sc->txring_queued += nsegs;
692 /* Kick the transmitter. */
693 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
694 CGEM_NET_CTRL_START_TX);
700 cgem_start(struct ifnet *ifp)
702 struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
705 cgem_start_locked(ifp);
709 /* Respond to changes in media. */
711 cgem_media_update(struct cgem_softc *sc, int active)
715 CGEM_ASSERT_LOCKED(sc);
717 /* Update hardware to reflect phy status. */
718 net_cfg = RD4(sc, CGEM_NET_CFG);
719 net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
720 CGEM_NET_CFG_FULL_DUPLEX);
722 if (IFM_SUBTYPE(active) == IFM_1000_T)
723 net_cfg |= (CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN);
724 else if (IFM_SUBTYPE(active) == IFM_100_TX)
725 net_cfg |= CGEM_NET_CFG_SPEED100;
727 if ((active & IFM_FDX) != 0)
728 net_cfg |= CGEM_NET_CFG_FULL_DUPLEX;
729 WR4(sc, CGEM_NET_CFG, net_cfg);
735 struct cgem_softc *sc = (struct cgem_softc *)arg;
736 struct mii_data *mii;
739 CGEM_ASSERT_LOCKED(sc);
742 if (sc->miibus != NULL) {
743 mii = device_get_softc(sc->miibus);
744 active = mii->mii_media_active;
746 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
747 (IFM_ACTIVE | IFM_AVALID) &&
748 active != mii->mii_media_active)
749 cgem_media_update(sc, mii->mii_media_active);
752 /* Next callout in one second. */
753 callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
756 /* Interrupt handler. */
760 struct cgem_softc *sc = (struct cgem_softc *)arg;
765 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
770 istatus = RD4(sc, CGEM_INTR_STAT);
771 WR4(sc, CGEM_INTR_STAT, istatus &
772 (CGEM_INTR_RX_COMPLETE | CGEM_INTR_TX_USED_READ |
773 CGEM_INTR_RX_OVERRUN | CGEM_INTR_HRESP_NOT_OK));
775 /* Hresp not ok. Something very bad with DMA. Try to clear. */
776 if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) {
777 printf("cgem_intr: hresp not okay! rx_status=0x%x\n",
778 RD4(sc, CGEM_RX_STAT));
779 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK);
782 /* Transmitter has idled. Free up any spent transmit buffers. */
783 if ((istatus & CGEM_INTR_TX_USED_READ) != 0)
786 /* Packets received or overflow. */
787 if ((istatus & (CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN)) != 0) {
789 cgem_fill_rqueue(sc);
790 if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) {
791 /* Clear rx status register. */
793 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
800 /* Reset hardware. */
802 cgem_reset(struct cgem_softc *sc)
805 CGEM_ASSERT_LOCKED(sc);
807 WR4(sc, CGEM_NET_CTRL, 0);
808 WR4(sc, CGEM_NET_CFG, 0);
809 WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
810 WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
811 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
812 WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL);
813 WR4(sc, CGEM_HASH_BOT, 0);
814 WR4(sc, CGEM_HASH_TOP, 0);
815 WR4(sc, CGEM_TX_QBAR, 0); /* manual says do this. */
816 WR4(sc, CGEM_RX_QBAR, 0);
818 /* Get management port running even if interface is down. */
819 WR4(sc, CGEM_NET_CFG,
820 CGEM_NET_CFG_DBUS_WIDTH_32 |
821 CGEM_NET_CFG_MDC_CLK_DIV_64);
823 sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
824 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
827 /* Bring up the hardware. */
829 cgem_config(struct cgem_softc *sc)
834 CGEM_ASSERT_LOCKED(sc);
836 /* Program Net Config Register. */
837 net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 |
838 CGEM_NET_CFG_MDC_CLK_DIV_64 |
839 CGEM_NET_CFG_FCS_REMOVE |
840 CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
841 CGEM_NET_CFG_GIGE_EN |
842 CGEM_NET_CFG_FULL_DUPLEX |
843 CGEM_NET_CFG_SPEED100;
845 /* Enable receive checksum offloading? */
846 if ((sc->ifp->if_capenable & IFCAP_RXCSUM) != 0)
847 net_cfg |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
849 WR4(sc, CGEM_NET_CFG, net_cfg);
851 /* Program DMA Config Register. */
852 dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) |
853 CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
854 CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
855 CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16;
857 /* Enable transmit checksum offloading? */
858 if ((sc->ifp->if_capenable & IFCAP_TXCSUM) != 0)
859 dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN;
861 WR4(sc, CGEM_DMA_CFG, dma_cfg);
863 /* Write the rx and tx descriptor ring addresses to the QBAR regs. */
864 WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr);
865 WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr);
867 /* Enable rx and tx. */
868 sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
869 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
871 /* Set up interrupts. */
872 WR4(sc, CGEM_INTR_EN,
873 CGEM_INTR_RX_COMPLETE | CGEM_INTR_TX_USED_READ |
874 CGEM_INTR_RX_OVERRUN | CGEM_INTR_HRESP_NOT_OK);
877 /* Turn on interface and load up receive ring with buffers. */
879 cgem_init_locked(struct cgem_softc *sc)
881 struct mii_data *mii;
883 CGEM_ASSERT_LOCKED(sc);
885 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
889 cgem_fill_rqueue(sc);
891 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
892 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
894 mii = device_get_softc(sc->miibus);
896 cgem_media_update(sc, mii->mii_media_active);
897 cgem_start_locked(sc->ifp);
899 callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
905 struct cgem_softc *sc = (struct cgem_softc *)arg;
908 cgem_init_locked(sc);
912 /* Turn off interface. Free up any buffers in transmit or receive queues. */
914 cgem_stop(struct cgem_softc *sc)
918 CGEM_ASSERT_LOCKED(sc);
920 callout_stop(&sc->tick_ch);
922 /* Shut down hardware. */
925 /* Clear out transmit queue. */
926 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
927 sc->txring[i].ctl = CGEM_TXDESC_USED;
928 sc->txring[i].addr = 0;
929 if (sc->txring_m[i]) {
930 bus_dmamap_unload(sc->mbuf_dma_tag,
931 sc->txring_m_dmamap[i]);
932 m_freem(sc->txring_m[i]);
933 sc->txring_m[i] = NULL;
936 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
938 sc->txring_hd_ptr = 0;
939 sc->txring_tl_ptr = 0;
940 sc->txring_queued = 0;
942 /* Clear out receive queue. */
943 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
944 sc->rxring[i].addr = CGEM_RXDESC_OWN;
945 sc->rxring[i].ctl = 0;
946 if (sc->rxring_m[i]) {
948 bus_dmamap_unload(sc->mbuf_dma_tag,
949 sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
951 m_freem(sc->rxring_m[i]);
952 sc->rxring_m[i] = NULL;
955 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
957 sc->rxring_hd_ptr = 0;
958 sc->rxring_tl_ptr = 0;
959 sc->rxring_queued = 0;
964 cgem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
966 struct cgem_softc *sc = ifp->if_softc;
967 struct ifreq *ifr = (struct ifreq *)data;
968 struct mii_data *mii;
974 if ((ifp->if_flags & IFF_UP) != 0) {
975 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
976 if (((ifp->if_flags ^ sc->if_old_flags) &
977 (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
981 cgem_init_locked(sc);
983 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
984 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
987 sc->if_old_flags = ifp->if_flags;
993 /* Set up multi-cast filters. */
994 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1003 mii = device_get_softc(sc->miibus);
1004 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1009 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1011 if ((mask & IFCAP_TXCSUM) != 0) {
1012 if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) {
1013 /* Turn on TX checksumming. */
1014 ifp->if_capenable |= (IFCAP_TXCSUM |
1016 ifp->if_hwassist |= CGEM_CKSUM_ASSIST;
1018 WR4(sc, CGEM_DMA_CFG,
1019 RD4(sc, CGEM_DMA_CFG) |
1020 CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1022 /* Turn off TX checksumming. */
1023 ifp->if_capenable &= ~(IFCAP_TXCSUM |
1025 ifp->if_hwassist &= ~CGEM_CKSUM_ASSIST;
1027 WR4(sc, CGEM_DMA_CFG,
1028 RD4(sc, CGEM_DMA_CFG) &
1029 ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1032 if ((mask & IFCAP_RXCSUM) != 0) {
1033 if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) {
1034 /* Turn on RX checksumming. */
1035 ifp->if_capenable |= (IFCAP_RXCSUM |
1037 WR4(sc, CGEM_NET_CFG,
1038 RD4(sc, CGEM_NET_CFG) |
1039 CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1041 /* Turn off RX checksumming. */
1042 ifp->if_capenable &= ~(IFCAP_RXCSUM |
1044 WR4(sc, CGEM_NET_CFG,
1045 RD4(sc, CGEM_NET_CFG) &
1046 ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1053 error = ether_ioctl(ifp, cmd, data);
1060 /* MII bus support routines.
1063 cgem_child_detached(device_t dev, device_t child)
1065 struct cgem_softc *sc = device_get_softc(dev);
1066 if (child == sc->miibus)
1071 cgem_ifmedia_upd(struct ifnet *ifp)
1073 struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
1074 struct mii_data *mii;
1076 mii = device_get_softc(sc->miibus);
1084 cgem_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1086 struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
1087 struct mii_data *mii;
1089 mii = device_get_softc(sc->miibus);
1092 ifmr->ifm_active = mii->mii_media_active;
1093 ifmr->ifm_status = mii->mii_media_status;
1098 cgem_miibus_readreg(device_t dev, int phy, int reg)
1100 struct cgem_softc *sc = device_get_softc(dev);
1103 WR4(sc, CGEM_PHY_MAINT,
1104 CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1105 CGEM_PHY_MAINT_OP_READ |
1106 (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1107 (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT));
1109 /* Wait for completion. */
1111 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1113 if (++tries > 200) {
1114 device_printf(dev, "phy read timeout: %d\n", reg);
1119 val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK;
1125 cgem_miibus_writereg(device_t dev, int phy, int reg, int data)
1127 struct cgem_softc *sc = device_get_softc(dev);
1130 WR4(sc, CGEM_PHY_MAINT,
1131 CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1132 CGEM_PHY_MAINT_OP_WRITE |
1133 (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1134 (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) |
1135 (data & CGEM_PHY_MAINT_DATA_MASK));
1137 /* Wait for completion. */
1139 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1141 if (++tries > 200) {
1142 device_printf(dev, "phy write timeout: %d\n", reg);
1152 cgem_probe(device_t dev)
1155 if (!ofw_bus_is_compatible(dev, "cadence,gem"))
1158 device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface");
1163 cgem_attach(device_t dev)
1165 struct cgem_softc *sc = device_get_softc(dev);
1166 struct ifnet *ifp = NULL;
1168 u_char eaddr[ETHER_ADDR_LEN];
1173 /* Get memory resource. */
1175 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1177 if (sc->mem_res == NULL) {
1178 device_printf(dev, "could not allocate memory resources.\n");
1182 /* Get IRQ resource. */
1184 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1186 if (sc->irq_res == NULL) {
1187 device_printf(dev, "could not allocate interrupt resource.\n");
1192 ifp = sc->ifp = if_alloc(IFT_ETHER);
1194 device_printf(dev, "could not allocate ifnet structure\n");
1201 /* Reset hardware. */
1204 /* Attach phy to mii bus. */
1205 err = mii_attach(dev, &sc->miibus, ifp,
1206 cgem_ifmedia_upd, cgem_ifmedia_sts,
1207 BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1210 device_printf(dev, "attaching PHYs failed\n");
1215 /* Set up TX and RX descriptor area. */
1216 err = cgem_setup_descs(sc);
1219 device_printf(dev, "could not set up dma mem for descs.\n");
1224 /* Get a MAC address. */
1225 cgem_get_mac(sc, eaddr);
1228 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1230 /* Set up ifnet structure. */
1232 if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev));
1233 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1234 ifp->if_start = cgem_start;
1235 ifp->if_ioctl = cgem_ioctl;
1236 ifp->if_init = cgem_init;
1237 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
1238 /* XXX: disable hw checksumming for now. */
1239 ifp->if_hwassist = 0;
1240 ifp->if_capenable = ifp->if_capabilities &
1241 ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
1242 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1243 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
1244 IFQ_SET_READY(&ifp->if_snd);
1246 sc->if_old_flags = ifp->if_flags;
1247 sc->rxbufs = DEFAULT_NUM_RX_BUFS;
1249 ether_ifattach(ifp, eaddr);
1251 err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
1252 INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
1255 device_printf(dev, "could not set interrupt handler.\n");
1256 ether_ifdetach(ifp);
1261 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1262 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1263 OID_AUTO, "rxbufs", CTLFLAG_RW,
1265 "Number receive buffers to provide");
1267 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1268 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1269 OID_AUTO, "_rxoverruns", CTLFLAG_RD,
1271 "Receive ring overrun events");
1279 cgem_detach(device_t dev)
1281 struct cgem_softc *sc = device_get_softc(dev);
1287 if (device_is_attached(dev)) {
1291 callout_drain(&sc->tick_ch);
1292 sc->ifp->if_flags &= ~IFF_UP;
1293 ether_ifdetach(sc->ifp);
1296 if (sc->miibus != NULL) {
1297 device_delete_child(dev, sc->miibus);
1301 /* Release resrouces. */
1302 if (sc->mem_res != NULL) {
1303 bus_release_resource(dev, SYS_RES_MEMORY,
1304 rman_get_rid(sc->mem_res), sc->mem_res);
1307 if (sc->irq_res != NULL) {
1309 bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1310 bus_release_resource(dev, SYS_RES_IRQ,
1311 rman_get_rid(sc->irq_res), sc->irq_res);
1315 /* Release DMA resources. */
1316 if (sc->rxring_dma_map != NULL) {
1317 bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
1318 sc->rxring_dma_map);
1319 sc->rxring_dma_map = NULL;
1320 for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
1321 if (sc->rxring_m_dmamap[i] != NULL) {
1322 bus_dmamap_destroy(sc->mbuf_dma_tag,
1323 sc->rxring_m_dmamap[i]);
1324 sc->rxring_m_dmamap[i] = NULL;
1327 if (sc->txring_dma_map != NULL) {
1328 bus_dmamem_free(sc->desc_dma_tag, sc->txring,
1329 sc->txring_dma_map);
1330 sc->txring_dma_map = NULL;
1331 for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
1332 if (sc->txring_m_dmamap[i] != NULL) {
1333 bus_dmamap_destroy(sc->mbuf_dma_tag,
1334 sc->txring_m_dmamap[i]);
1335 sc->txring_m_dmamap[i] = NULL;
1338 if (sc->desc_dma_tag != NULL) {
1339 bus_dma_tag_destroy(sc->desc_dma_tag);
1340 sc->desc_dma_tag = NULL;
1342 if (sc->mbuf_dma_tag != NULL) {
1343 bus_dma_tag_destroy(sc->mbuf_dma_tag);
1344 sc->mbuf_dma_tag = NULL;
1347 bus_generic_detach(dev);
1349 CGEM_LOCK_DESTROY(sc);
1354 static device_method_t cgem_methods[] = {
1355 /* Device interface */
1356 DEVMETHOD(device_probe, cgem_probe),
1357 DEVMETHOD(device_attach, cgem_attach),
1358 DEVMETHOD(device_detach, cgem_detach),
1361 DEVMETHOD(bus_child_detached, cgem_child_detached),
1364 DEVMETHOD(miibus_readreg, cgem_miibus_readreg),
1365 DEVMETHOD(miibus_writereg, cgem_miibus_writereg),
1370 static driver_t cgem_driver = {
1373 sizeof(struct cgem_softc),
1376 DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL);
1377 DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL);
1378 MODULE_DEPEND(cgem, miibus, 1, 1, 1);
1379 MODULE_DEPEND(cgem, ether, 1, 1, 1);