2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2012-2014 Thomas Skibo <thomasskibo@yahoo.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * A network interface driver for Cadence GEM Gigabit Ethernet
31 * interface such as the one used in Xilinx Zynq-7000 SoC.
33 * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
34 * (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16
35 * and register definitions are in appendix B.18.
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
41 #include <sys/param.h>
42 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
47 #include <sys/module.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
53 #include <machine/bus.h>
55 #include <net/ethernet.h>
57 #include <net/if_arp.h>
58 #include <net/if_dl.h>
59 #include <net/if_media.h>
60 #include <net/if_mib.h>
61 #include <net/if_types.h>
64 #include <netinet/in.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in_var.h>
67 #include <netinet/ip.h>
71 #include <net/bpfdesc.h>
73 #include <dev/fdt/fdt_common.h>
74 #include <dev/ofw/ofw_bus.h>
75 #include <dev/ofw/ofw_bus_subr.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
80 #include <dev/cadence/if_cgem_hw.h>
82 #include "miibus_if.h"
84 #define IF_CGEM_NAME "cgem"
86 #define CGEM_NUM_RX_DESCS 512 /* size of receive descriptor ring */
87 #define CGEM_NUM_TX_DESCS 512 /* size of transmit descriptor ring */
89 #define MAX_DESC_RING_SIZE (MAX(CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),\
90 CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc)))
93 /* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */
94 #define DEFAULT_NUM_RX_BUFS 256 /* number of receive bufs to queue. */
96 #define TX_MAX_DMA_SEGS 8 /* maximum segs in a tx mbuf dma */
98 #define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \
99 CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
106 u_int mii_media_active; /* last active media */
108 struct resource *mem_res;
109 struct resource *irq_res;
111 struct callout tick_ch;
112 uint32_t net_ctl_shadow;
116 bus_dma_tag_t desc_dma_tag;
117 bus_dma_tag_t mbuf_dma_tag;
119 /* receive descriptor ring */
120 struct cgem_rx_desc *rxring;
121 bus_addr_t rxring_physaddr;
122 struct mbuf *rxring_m[CGEM_NUM_RX_DESCS];
123 bus_dmamap_t rxring_m_dmamap[CGEM_NUM_RX_DESCS];
124 int rxring_hd_ptr; /* where to put rcv bufs */
125 int rxring_tl_ptr; /* where to get receives */
126 int rxring_queued; /* how many rcv bufs queued */
127 bus_dmamap_t rxring_dma_map;
128 int rxbufs; /* tunable number rcv bufs */
129 int rxhangwar; /* rx hang work-around */
130 u_int rxoverruns; /* rx overruns */
131 u_int rxnobufs; /* rx buf ring empty events */
132 u_int rxdmamapfails; /* rx dmamap failures */
133 uint32_t rx_frames_prev;
135 /* transmit descriptor ring */
136 struct cgem_tx_desc *txring;
137 bus_addr_t txring_physaddr;
138 struct mbuf *txring_m[CGEM_NUM_TX_DESCS];
139 bus_dmamap_t txring_m_dmamap[CGEM_NUM_TX_DESCS];
140 int txring_hd_ptr; /* where to put next xmits */
141 int txring_tl_ptr; /* next xmit mbuf to free */
142 int txring_queued; /* num xmits segs queued */
143 bus_dmamap_t txring_dma_map;
144 u_int txfull; /* tx ring full events */
145 u_int txdefrags; /* tx calls to m_defrag() */
146 u_int txdefragfails; /* tx m_defrag() failures */
147 u_int txdmamapfails; /* tx dmamap failures */
149 /* hardware provided statistics */
150 struct cgem_hw_stats {
153 uint32_t tx_frames_bcast;
154 uint32_t tx_frames_multi;
155 uint32_t tx_frames_pause;
156 uint32_t tx_frames_64b;
157 uint32_t tx_frames_65to127b;
158 uint32_t tx_frames_128to255b;
159 uint32_t tx_frames_256to511b;
160 uint32_t tx_frames_512to1023b;
161 uint32_t tx_frames_1024to1536b;
162 uint32_t tx_under_runs;
163 uint32_t tx_single_collisn;
164 uint32_t tx_multi_collisn;
165 uint32_t tx_excsv_collisn;
166 uint32_t tx_late_collisn;
167 uint32_t tx_deferred_frames;
168 uint32_t tx_carrier_sense_errs;
172 uint32_t rx_frames_bcast;
173 uint32_t rx_frames_multi;
174 uint32_t rx_frames_pause;
175 uint32_t rx_frames_64b;
176 uint32_t rx_frames_65to127b;
177 uint32_t rx_frames_128to255b;
178 uint32_t rx_frames_256to511b;
179 uint32_t rx_frames_512to1023b;
180 uint32_t rx_frames_1024to1536b;
181 uint32_t rx_frames_undersize;
182 uint32_t rx_frames_oversize;
183 uint32_t rx_frames_jabber;
184 uint32_t rx_frames_fcs_errs;
185 uint32_t rx_frames_length_errs;
186 uint32_t rx_symbol_errs;
187 uint32_t rx_align_errs;
188 uint32_t rx_resource_errs;
189 uint32_t rx_overrun_errs;
190 uint32_t rx_ip_hdr_csum_errs;
191 uint32_t rx_tcp_csum_errs;
192 uint32_t rx_udp_csum_errs;
196 #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off)))
197 #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val)))
198 #define BARRIER(sc, off, len, flags) \
199 (bus_barrier((sc)->mem_res, (off), (len), (flags))
201 #define CGEM_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
202 #define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
203 #define CGEM_LOCK_INIT(sc) \
204 mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \
205 MTX_NETWORK_LOCK, MTX_DEF)
206 #define CGEM_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx)
207 #define CGEM_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
209 /* Allow platforms to optionally provide a way to set the reference clock. */
210 int cgem_set_ref_clk(int unit, int frequency);
212 static devclass_t cgem_devclass;
214 static int cgem_probe(device_t dev);
215 static int cgem_attach(device_t dev);
216 static int cgem_detach(device_t dev);
217 static void cgem_tick(void *);
218 static void cgem_intr(void *);
220 static void cgem_mediachange(struct cgem_softc *, struct mii_data *);
223 cgem_get_mac(struct cgem_softc *sc, u_char eaddr[])
228 /* See if boot loader gave us a MAC address already. */
229 for (i = 0; i < 4; i++) {
230 uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i));
231 uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff;
232 if (low != 0 || high != 0) {
233 eaddr[0] = low & 0xff;
234 eaddr[1] = (low >> 8) & 0xff;
235 eaddr[2] = (low >> 16) & 0xff;
236 eaddr[3] = (low >> 24) & 0xff;
237 eaddr[4] = high & 0xff;
238 eaddr[5] = (high >> 8) & 0xff;
243 /* No MAC from boot loader? Assign a random one. */
250 eaddr[3] = (rnd >> 16) & 0xff;
251 eaddr[4] = (rnd >> 8) & 0xff;
252 eaddr[5] = rnd & 0xff;
254 device_printf(sc->dev, "no mac address found, assigning "
255 "random: %02x:%02x:%02x:%02x:%02x:%02x\n",
256 eaddr[0], eaddr[1], eaddr[2],
257 eaddr[3], eaddr[4], eaddr[5]);
260 /* Move address to first slot and zero out the rest. */
261 WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
262 (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
263 WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
265 for (i = 1; i < 4; i++) {
266 WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0);
267 WR4(sc, CGEM_SPEC_ADDR_HI(i), 0);
271 /* cgem_mac_hash(): map 48-bit address to a 6-bit hash.
272 * The 6-bit hash corresponds to a bit in a 64-bit hash
273 * register. Setting that bit in the hash register enables
274 * reception of all frames with a destination address that hashes
275 * to that 6-bit value.
277 * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
278 * Reference Manual. Bits 0-5 in the hash are the exclusive-or of
279 * every sixth bit in the destination address.
282 cgem_mac_hash(u_char eaddr[])
288 for (i = 0; i < 6; i++)
289 for (j = i; j < 48; j += 6)
290 if ((eaddr[j >> 3] & (1 << (j & 7))) != 0)
296 /* After any change in rx flags or multi-cast addresses, set up
297 * hash registers and net config register bits.
300 cgem_rx_filter(struct cgem_softc *sc)
306 uint32_t hash_hi, hash_lo;
312 net_cfg = RD4(sc, CGEM_NET_CFG);
314 net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
315 CGEM_NET_CFG_NO_BCAST |
316 CGEM_NET_CFG_COPY_ALL);
318 if ((if_getflags(ifp) & IFF_PROMISC) != 0)
319 net_cfg |= CGEM_NET_CFG_COPY_ALL;
321 if ((if_getflags(ifp) & IFF_BROADCAST) == 0)
322 net_cfg |= CGEM_NET_CFG_NO_BCAST;
323 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
324 hash_hi = 0xffffffff;
325 hash_lo = 0xffffffff;
327 mcnt = if_multiaddr_count(ifp, -1);
328 mta = malloc(ETHER_ADDR_LEN * mcnt, M_DEVBUF,
331 device_printf(sc->dev,
332 "failed to allocate temp mcast list\n");
335 if_multiaddr_array(ifp, mta, &mcnt, mcnt);
336 for (i = 0; i < mcnt; i++) {
337 index = cgem_mac_hash(
338 LLADDR((struct sockaddr_dl *)
339 (mta + (i * ETHER_ADDR_LEN))));
341 hash_hi |= (1 << (index - 32));
343 hash_lo |= (1 << index);
348 if (hash_hi != 0 || hash_lo != 0)
349 net_cfg |= CGEM_NET_CFG_MULTI_HASH_EN;
352 WR4(sc, CGEM_HASH_TOP, hash_hi);
353 WR4(sc, CGEM_HASH_BOT, hash_lo);
354 WR4(sc, CGEM_NET_CFG, net_cfg);
357 /* For bus_dmamap_load() callback. */
359 cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
362 if (nsegs != 1 || error != 0)
364 *(bus_addr_t *)arg = segs[0].ds_addr;
367 /* Create DMA'able descriptor rings. */
369 cgem_setup_descs(struct cgem_softc *sc)
376 /* Allocate non-cached DMA space for RX and TX descriptors.
378 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
379 BUS_SPACE_MAXADDR_32BIT,
392 /* Set up a bus_dma_tag for mbufs. */
393 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
394 BUS_SPACE_MAXADDR_32BIT,
407 /* Allocate DMA memory in non-cacheable space. */
408 err = bus_dmamem_alloc(sc->desc_dma_tag,
409 (void **)&sc->rxring,
410 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
411 &sc->rxring_dma_map);
415 /* Load descriptor DMA memory. */
416 err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
418 CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),
419 cgem_getaddr, &sc->rxring_physaddr,
424 /* Initialize RX descriptors. */
425 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
426 sc->rxring[i].addr = CGEM_RXDESC_OWN;
427 sc->rxring[i].ctl = 0;
428 sc->rxring_m[i] = NULL;
429 sc->rxring_m_dmamap[i] = NULL;
431 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
433 sc->rxring_hd_ptr = 0;
434 sc->rxring_tl_ptr = 0;
435 sc->rxring_queued = 0;
437 /* Allocate DMA memory for TX descriptors in non-cacheable space. */
438 err = bus_dmamem_alloc(sc->desc_dma_tag,
439 (void **)&sc->txring,
440 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
441 &sc->txring_dma_map);
445 /* Load TX descriptor DMA memory. */
446 err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map,
448 CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc),
449 cgem_getaddr, &sc->txring_physaddr,
454 /* Initialize TX descriptor ring. */
455 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
456 sc->txring[i].addr = 0;
457 sc->txring[i].ctl = CGEM_TXDESC_USED;
458 sc->txring_m[i] = NULL;
459 sc->txring_m_dmamap[i] = NULL;
461 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
463 sc->txring_hd_ptr = 0;
464 sc->txring_tl_ptr = 0;
465 sc->txring_queued = 0;
470 /* Fill receive descriptor ring with mbufs. */
472 cgem_fill_rqueue(struct cgem_softc *sc)
474 struct mbuf *m = NULL;
475 bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
478 CGEM_ASSERT_LOCKED(sc);
480 while (sc->rxring_queued < sc->rxbufs) {
481 /* Get a cluster mbuf. */
482 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
487 m->m_pkthdr.len = MCLBYTES;
488 m->m_pkthdr.rcvif = sc->ifp;
490 /* Load map and plug in physical address. */
491 if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
492 &sc->rxring_m_dmamap[sc->rxring_hd_ptr])) {
497 if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
498 sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
499 segs, &nsegs, BUS_DMA_NOWAIT)) {
501 bus_dmamap_destroy(sc->mbuf_dma_tag,
502 sc->rxring_m_dmamap[sc->rxring_hd_ptr]);
503 sc->rxring_m_dmamap[sc->rxring_hd_ptr] = NULL;
507 sc->rxring_m[sc->rxring_hd_ptr] = m;
509 /* Sync cache with receive buffer. */
510 bus_dmamap_sync(sc->mbuf_dma_tag,
511 sc->rxring_m_dmamap[sc->rxring_hd_ptr],
512 BUS_DMASYNC_PREREAD);
514 /* Write rx descriptor and increment head pointer. */
515 sc->rxring[sc->rxring_hd_ptr].ctl = 0;
516 if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
517 sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
519 sc->rxring_hd_ptr = 0;
521 sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
527 /* Pull received packets off of receive descriptor ring. */
529 cgem_recv(struct cgem_softc *sc)
532 struct mbuf *m, *m_hd, **m_tl;
535 CGEM_ASSERT_LOCKED(sc);
537 /* Pick up all packets in which the OWN bit is set. */
540 while (sc->rxring_queued > 0 &&
541 (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
543 ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
545 /* Grab filled mbuf. */
546 m = sc->rxring_m[sc->rxring_tl_ptr];
547 sc->rxring_m[sc->rxring_tl_ptr] = NULL;
549 /* Sync cache with receive buffer. */
550 bus_dmamap_sync(sc->mbuf_dma_tag,
551 sc->rxring_m_dmamap[sc->rxring_tl_ptr],
552 BUS_DMASYNC_POSTREAD);
554 /* Unload and destroy dmamap. */
555 bus_dmamap_unload(sc->mbuf_dma_tag,
556 sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
557 bus_dmamap_destroy(sc->mbuf_dma_tag,
558 sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
559 sc->rxring_m_dmamap[sc->rxring_tl_ptr] = NULL;
561 /* Increment tail pointer. */
562 if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS)
563 sc->rxring_tl_ptr = 0;
566 /* Check FCS and make sure entire packet landed in one mbuf
567 * cluster (which is much bigger than the largest ethernet
570 if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 ||
571 (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) !=
572 (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) {
575 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
579 /* Ready it to hand off to upper layers. */
580 m->m_data += ETHER_ALIGN;
581 m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK);
582 m->m_pkthdr.rcvif = ifp;
583 m->m_pkthdr.len = m->m_len;
585 /* Are we using hardware checksumming? Check the
586 * status in the receive descriptor.
588 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
589 /* TCP or UDP checks out, IP checks out too. */
590 if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
591 CGEM_RXDESC_CKSUM_STAT_TCP_GOOD ||
592 (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
593 CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) {
594 m->m_pkthdr.csum_flags |=
595 CSUM_IP_CHECKED | CSUM_IP_VALID |
596 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
597 m->m_pkthdr.csum_data = 0xffff;
598 } else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
599 CGEM_RXDESC_CKSUM_STAT_IP_GOOD) {
600 /* Only IP checks out. */
601 m->m_pkthdr.csum_flags |=
602 CSUM_IP_CHECKED | CSUM_IP_VALID;
603 m->m_pkthdr.csum_data = 0xffff;
607 /* Queue it up for delivery below. */
612 /* Replenish receive buffers. */
613 cgem_fill_rqueue(sc);
615 /* Unlock and send up packets. */
617 while (m_hd != NULL) {
621 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
627 /* Find completed transmits and free their mbufs. */
629 cgem_clean_tx(struct cgem_softc *sc)
634 CGEM_ASSERT_LOCKED(sc);
636 /* free up finished transmits. */
637 while (sc->txring_queued > 0 &&
638 ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
639 CGEM_TXDESC_USED) != 0) {
642 bus_dmamap_sync(sc->mbuf_dma_tag,
643 sc->txring_m_dmamap[sc->txring_tl_ptr],
644 BUS_DMASYNC_POSTWRITE);
646 /* Unload and destroy DMA map. */
647 bus_dmamap_unload(sc->mbuf_dma_tag,
648 sc->txring_m_dmamap[sc->txring_tl_ptr]);
649 bus_dmamap_destroy(sc->mbuf_dma_tag,
650 sc->txring_m_dmamap[sc->txring_tl_ptr]);
651 sc->txring_m_dmamap[sc->txring_tl_ptr] = NULL;
653 /* Free up the mbuf. */
654 m = sc->txring_m[sc->txring_tl_ptr];
655 sc->txring_m[sc->txring_tl_ptr] = NULL;
658 /* Check the status. */
659 if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
660 /* Serious bus error. log to console. */
661 device_printf(sc->dev, "cgem_clean_tx: Whoa! "
662 "AHB error, addr=0x%x\n",
663 sc->txring[sc->txring_tl_ptr].addr);
664 } else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
665 CGEM_TXDESC_LATE_COLL)) != 0) {
666 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
668 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
670 /* If the packet spanned more than one tx descriptor,
671 * skip descriptors until we find the end so that only
672 * start-of-frame descriptors are processed.
674 while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) {
675 if ((ctl & CGEM_TXDESC_WRAP) != 0)
676 sc->txring_tl_ptr = 0;
681 ctl = sc->txring[sc->txring_tl_ptr].ctl;
683 sc->txring[sc->txring_tl_ptr].ctl =
684 ctl | CGEM_TXDESC_USED;
687 /* Next descriptor. */
688 if ((ctl & CGEM_TXDESC_WRAP) != 0)
689 sc->txring_tl_ptr = 0;
694 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
698 /* Start transmits. */
700 cgem_start_locked(if_t ifp)
702 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
704 bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
706 int i, nsegs, wrap, err;
708 CGEM_ASSERT_LOCKED(sc);
710 if ((if_getdrvflags(ifp) & IFF_DRV_OACTIVE) != 0)
714 /* Check that there is room in the descriptor ring. */
715 if (sc->txring_queued >=
716 CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
718 /* Try to make room. */
722 if (sc->txring_queued >=
723 CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
724 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
730 /* Grab next transmit packet. */
735 /* Create and load DMA map. */
736 if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
737 &sc->txring_m_dmamap[sc->txring_hd_ptr])) {
742 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
743 sc->txring_m_dmamap[sc->txring_hd_ptr],
744 m, segs, &nsegs, BUS_DMA_NOWAIT);
746 /* Too many segments! defrag and try again. */
747 struct mbuf *m2 = m_defrag(m, M_NOWAIT);
752 bus_dmamap_destroy(sc->mbuf_dma_tag,
753 sc->txring_m_dmamap[sc->txring_hd_ptr]);
754 sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
758 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
759 sc->txring_m_dmamap[sc->txring_hd_ptr],
760 m, segs, &nsegs, BUS_DMA_NOWAIT);
766 bus_dmamap_destroy(sc->mbuf_dma_tag,
767 sc->txring_m_dmamap[sc->txring_hd_ptr]);
768 sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
772 sc->txring_m[sc->txring_hd_ptr] = m;
774 /* Sync tx buffer with cache. */
775 bus_dmamap_sync(sc->mbuf_dma_tag,
776 sc->txring_m_dmamap[sc->txring_hd_ptr],
777 BUS_DMASYNC_PREWRITE);
779 /* Set wrap flag if next packet might run off end of ring. */
780 wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
783 /* Fill in the TX descriptors back to front so that USED
784 * bit in first descriptor is cleared last.
786 for (i = nsegs - 1; i >= 0; i--) {
787 /* Descriptor address. */
788 sc->txring[sc->txring_hd_ptr + i].addr =
791 /* Descriptor control word. */
792 ctl = segs[i].ds_len;
793 if (i == nsegs - 1) {
794 ctl |= CGEM_TXDESC_LAST_BUF;
796 ctl |= CGEM_TXDESC_WRAP;
798 sc->txring[sc->txring_hd_ptr + i].ctl = ctl;
801 sc->txring_m[sc->txring_hd_ptr + i] = NULL;
805 sc->txring_hd_ptr = 0;
807 sc->txring_hd_ptr += nsegs;
808 sc->txring_queued += nsegs;
810 /* Kick the transmitter. */
811 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
812 CGEM_NET_CTRL_START_TX);
814 /* If there is a BPF listener, bounce a copy to him. */
815 ETHER_BPF_MTAP(ifp, m);
822 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
825 cgem_start_locked(ifp);
830 cgem_poll_hw_stats(struct cgem_softc *sc)
834 CGEM_ASSERT_LOCKED(sc);
836 sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT);
837 sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32;
839 sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX);
840 sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX);
841 sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX);
842 sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX);
843 sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX);
844 sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX);
845 sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX);
846 sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX);
847 sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX);
848 sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX);
849 sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS);
851 n = RD4(sc, CGEM_SINGLE_COLL_FRAMES);
852 sc->stats.tx_single_collisn += n;
853 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
854 n = RD4(sc, CGEM_MULTI_COLL_FRAMES);
855 sc->stats.tx_multi_collisn += n;
856 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
857 n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES);
858 sc->stats.tx_excsv_collisn += n;
859 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
860 n = RD4(sc, CGEM_LATE_COLL);
861 sc->stats.tx_late_collisn += n;
862 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
864 sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES);
865 sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS);
867 sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT);
868 sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32;
870 sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX);
871 sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX);
872 sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX);
873 sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX);
874 sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX);
875 sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX);
876 sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX);
877 sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX);
878 sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX);
879 sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX);
880 sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX);
881 sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX);
882 sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX);
883 sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS);
884 sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS);
885 sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS);
886 sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS);
887 sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS);
888 sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS);
889 sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS);
890 sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS);
891 sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS);
897 struct cgem_softc *sc = (struct cgem_softc *)arg;
898 struct mii_data *mii;
900 CGEM_ASSERT_LOCKED(sc);
903 if (sc->miibus != NULL) {
904 mii = device_get_softc(sc->miibus);
908 /* Poll statistics registers. */
909 cgem_poll_hw_stats(sc);
911 /* Check for receiver hang. */
912 if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) {
914 * Reset receiver logic by toggling RX_EN bit. 1usec
915 * delay is necessary especially when operating at 100mbps
918 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow &
919 ~CGEM_NET_CTRL_RX_EN);
921 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
923 sc->rx_frames_prev = sc->stats.rx_frames;
925 /* Next callout in one second. */
926 callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
929 /* Interrupt handler. */
933 struct cgem_softc *sc = (struct cgem_softc *)arg;
939 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
944 /* Read interrupt status and immediately clear the bits. */
945 istatus = RD4(sc, CGEM_INTR_STAT);
946 WR4(sc, CGEM_INTR_STAT, istatus);
948 /* Packets received. */
949 if ((istatus & CGEM_INTR_RX_COMPLETE) != 0)
952 /* Free up any completed transmit buffers. */
955 /* Hresp not ok. Something is very bad with DMA. Try to clear. */
956 if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) {
957 device_printf(sc->dev, "cgem_intr: hresp not okay! "
958 "rx_status=0x%x\n", RD4(sc, CGEM_RX_STAT));
959 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK);
962 /* Receiver overrun. */
963 if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) {
964 /* Clear status bit. */
965 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN);
969 /* Receiver ran out of bufs. */
970 if ((istatus & CGEM_INTR_RX_USED_READ) != 0) {
971 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
972 CGEM_NET_CTRL_FLUSH_DPRAM_PKT);
973 cgem_fill_rqueue(sc);
977 /* Restart transmitter if needed. */
978 if (!if_sendq_empty(ifp))
979 cgem_start_locked(ifp);
984 /* Reset hardware. */
986 cgem_reset(struct cgem_softc *sc)
989 CGEM_ASSERT_LOCKED(sc);
991 WR4(sc, CGEM_NET_CTRL, 0);
992 WR4(sc, CGEM_NET_CFG, 0);
993 WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
994 WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
995 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
996 WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL);
997 WR4(sc, CGEM_HASH_BOT, 0);
998 WR4(sc, CGEM_HASH_TOP, 0);
999 WR4(sc, CGEM_TX_QBAR, 0); /* manual says do this. */
1000 WR4(sc, CGEM_RX_QBAR, 0);
1002 /* Get management port running even if interface is down. */
1003 WR4(sc, CGEM_NET_CFG,
1004 CGEM_NET_CFG_DBUS_WIDTH_32 |
1005 CGEM_NET_CFG_MDC_CLK_DIV_64);
1007 sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
1008 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
1011 /* Bring up the hardware. */
1013 cgem_config(struct cgem_softc *sc)
1018 u_char *eaddr = if_getlladdr(ifp);
1020 CGEM_ASSERT_LOCKED(sc);
1022 /* Program Net Config Register. */
1023 net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 |
1024 CGEM_NET_CFG_MDC_CLK_DIV_64 |
1025 CGEM_NET_CFG_FCS_REMOVE |
1026 CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
1027 CGEM_NET_CFG_GIGE_EN |
1028 CGEM_NET_CFG_1536RXEN |
1029 CGEM_NET_CFG_FULL_DUPLEX |
1030 CGEM_NET_CFG_SPEED100;
1032 /* Enable receive checksum offloading? */
1033 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
1034 net_cfg |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
1036 WR4(sc, CGEM_NET_CFG, net_cfg);
1038 /* Program DMA Config Register. */
1039 dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) |
1040 CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
1041 CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
1042 CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 |
1043 CGEM_DMA_CFG_DISC_WHEN_NO_AHB;
1045 /* Enable transmit checksum offloading? */
1046 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
1047 dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN;
1049 WR4(sc, CGEM_DMA_CFG, dma_cfg);
1051 /* Write the rx and tx descriptor ring addresses to the QBAR regs. */
1052 WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr);
1053 WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr);
1055 /* Enable rx and tx. */
1056 sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
1057 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
1059 /* Set receive address in case it changed. */
1060 WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
1061 (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
1062 WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
1064 /* Set up interrupts. */
1065 WR4(sc, CGEM_INTR_EN,
1066 CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN |
1067 CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ |
1068 CGEM_INTR_HRESP_NOT_OK);
1071 /* Turn on interface and load up receive ring with buffers. */
1073 cgem_init_locked(struct cgem_softc *sc)
1075 struct mii_data *mii;
1077 CGEM_ASSERT_LOCKED(sc);
1079 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0)
1083 cgem_fill_rqueue(sc);
1085 if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
1087 mii = device_get_softc(sc->miibus);
1090 callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
1094 cgem_init(void *arg)
1096 struct cgem_softc *sc = (struct cgem_softc *)arg;
1099 cgem_init_locked(sc);
1103 /* Turn off interface. Free up any buffers in transmit or receive queues. */
1105 cgem_stop(struct cgem_softc *sc)
1109 CGEM_ASSERT_LOCKED(sc);
1111 callout_stop(&sc->tick_ch);
1113 /* Shut down hardware. */
1116 /* Clear out transmit queue. */
1117 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
1118 sc->txring[i].ctl = CGEM_TXDESC_USED;
1119 sc->txring[i].addr = 0;
1120 if (sc->txring_m[i]) {
1121 /* Unload and destroy dmamap. */
1122 bus_dmamap_unload(sc->mbuf_dma_tag,
1123 sc->txring_m_dmamap[i]);
1124 bus_dmamap_destroy(sc->mbuf_dma_tag,
1125 sc->txring_m_dmamap[i]);
1126 sc->txring_m_dmamap[i] = NULL;
1127 m_freem(sc->txring_m[i]);
1128 sc->txring_m[i] = NULL;
1131 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
1133 sc->txring_hd_ptr = 0;
1134 sc->txring_tl_ptr = 0;
1135 sc->txring_queued = 0;
1137 /* Clear out receive queue. */
1138 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
1139 sc->rxring[i].addr = CGEM_RXDESC_OWN;
1140 sc->rxring[i].ctl = 0;
1141 if (sc->rxring_m[i]) {
1142 /* Unload and destroy dmamap. */
1143 bus_dmamap_unload(sc->mbuf_dma_tag,
1144 sc->rxring_m_dmamap[i]);
1145 bus_dmamap_destroy(sc->mbuf_dma_tag,
1146 sc->rxring_m_dmamap[i]);
1147 sc->rxring_m_dmamap[i] = NULL;
1149 m_freem(sc->rxring_m[i]);
1150 sc->rxring_m[i] = NULL;
1153 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
1155 sc->rxring_hd_ptr = 0;
1156 sc->rxring_tl_ptr = 0;
1157 sc->rxring_queued = 0;
1159 /* Force next statchg or linkchg to program net config register. */
1160 sc->mii_media_active = 0;
1165 cgem_ioctl(if_t ifp, u_long cmd, caddr_t data)
1167 struct cgem_softc *sc = if_getsoftc(ifp);
1168 struct ifreq *ifr = (struct ifreq *)data;
1169 struct mii_data *mii;
1170 int error = 0, mask;
1175 if ((if_getflags(ifp) & IFF_UP) != 0) {
1176 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1177 if (((if_getflags(ifp) ^ sc->if_old_flags) &
1178 (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1182 cgem_init_locked(sc);
1184 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1185 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1188 sc->if_old_flags = if_getflags(ifp);
1194 /* Set up multi-cast filters. */
1195 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1204 mii = device_get_softc(sc->miibus);
1205 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1210 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
1212 if ((mask & IFCAP_TXCSUM) != 0) {
1213 if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) {
1214 /* Turn on TX checksumming. */
1215 if_setcapenablebit(ifp, IFCAP_TXCSUM |
1216 IFCAP_TXCSUM_IPV6, 0);
1217 if_sethwassistbits(ifp, CGEM_CKSUM_ASSIST, 0);
1219 WR4(sc, CGEM_DMA_CFG,
1220 RD4(sc, CGEM_DMA_CFG) |
1221 CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1223 /* Turn off TX checksumming. */
1224 if_setcapenablebit(ifp, 0, IFCAP_TXCSUM |
1226 if_sethwassistbits(ifp, 0, CGEM_CKSUM_ASSIST);
1228 WR4(sc, CGEM_DMA_CFG,
1229 RD4(sc, CGEM_DMA_CFG) &
1230 ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1233 if ((mask & IFCAP_RXCSUM) != 0) {
1234 if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) {
1235 /* Turn on RX checksumming. */
1236 if_setcapenablebit(ifp, IFCAP_RXCSUM |
1237 IFCAP_RXCSUM_IPV6, 0);
1238 WR4(sc, CGEM_NET_CFG,
1239 RD4(sc, CGEM_NET_CFG) |
1240 CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1242 /* Turn off RX checksumming. */
1243 if_setcapenablebit(ifp, 0, IFCAP_RXCSUM |
1245 WR4(sc, CGEM_NET_CFG,
1246 RD4(sc, CGEM_NET_CFG) &
1247 ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1250 if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) ==
1251 (IFCAP_RXCSUM | IFCAP_TXCSUM))
1252 if_setcapenablebit(ifp, IFCAP_VLAN_HWCSUM, 0);
1254 if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWCSUM);
1259 error = ether_ioctl(ifp, cmd, data);
1266 /* MII bus support routines.
1269 cgem_child_detached(device_t dev, device_t child)
1271 struct cgem_softc *sc = device_get_softc(dev);
1273 if (child == sc->miibus)
1278 cgem_ifmedia_upd(if_t ifp)
1280 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
1281 struct mii_data *mii;
1282 struct mii_softc *miisc;
1285 mii = device_get_softc(sc->miibus);
1287 if ((if_getflags(ifp) & IFF_UP) != 0) {
1288 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1290 error = mii_mediachg(mii);
1298 cgem_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
1300 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
1301 struct mii_data *mii;
1303 mii = device_get_softc(sc->miibus);
1306 ifmr->ifm_active = mii->mii_media_active;
1307 ifmr->ifm_status = mii->mii_media_status;
1312 cgem_miibus_readreg(device_t dev, int phy, int reg)
1314 struct cgem_softc *sc = device_get_softc(dev);
1317 WR4(sc, CGEM_PHY_MAINT,
1318 CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1319 CGEM_PHY_MAINT_OP_READ |
1320 (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1321 (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT));
1323 /* Wait for completion. */
1325 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1327 if (++tries > 200) {
1328 device_printf(dev, "phy read timeout: %d\n", reg);
1333 val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK;
1335 if (reg == MII_EXTSR)
1337 * MAC does not support half-duplex at gig speeds.
1338 * Let mii(4) exclude the capability.
1340 val &= ~(EXTSR_1000XHDX | EXTSR_1000THDX);
1346 cgem_miibus_writereg(device_t dev, int phy, int reg, int data)
1348 struct cgem_softc *sc = device_get_softc(dev);
1351 WR4(sc, CGEM_PHY_MAINT,
1352 CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1353 CGEM_PHY_MAINT_OP_WRITE |
1354 (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1355 (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) |
1356 (data & CGEM_PHY_MAINT_DATA_MASK));
1358 /* Wait for completion. */
1360 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1362 if (++tries > 200) {
1363 device_printf(dev, "phy write timeout: %d\n", reg);
1372 cgem_miibus_statchg(device_t dev)
1374 struct cgem_softc *sc = device_get_softc(dev);
1375 struct mii_data *mii = device_get_softc(sc->miibus);
1377 CGEM_ASSERT_LOCKED(sc);
1379 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1380 (IFM_ACTIVE | IFM_AVALID) &&
1381 sc->mii_media_active != mii->mii_media_active)
1382 cgem_mediachange(sc, mii);
1386 cgem_miibus_linkchg(device_t dev)
1388 struct cgem_softc *sc = device_get_softc(dev);
1389 struct mii_data *mii = device_get_softc(sc->miibus);
1391 CGEM_ASSERT_LOCKED(sc);
1393 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1394 (IFM_ACTIVE | IFM_AVALID) &&
1395 sc->mii_media_active != mii->mii_media_active)
1396 cgem_mediachange(sc, mii);
1400 * Overridable weak symbol cgem_set_ref_clk(). This allows platforms to
1401 * provide a function to set the cgem's reference clock.
1404 cgem_default_set_ref_clk(int unit, int frequency)
1409 __weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk);
1411 /* Call to set reference clock and network config bits according to media. */
1413 cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii)
1418 CGEM_ASSERT_LOCKED(sc);
1420 /* Update hardware to reflect media. */
1421 net_cfg = RD4(sc, CGEM_NET_CFG);
1422 net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
1423 CGEM_NET_CFG_FULL_DUPLEX);
1425 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1427 net_cfg |= (CGEM_NET_CFG_SPEED100 |
1428 CGEM_NET_CFG_GIGE_EN);
1429 ref_clk_freq = 125000000;
1432 net_cfg |= CGEM_NET_CFG_SPEED100;
1433 ref_clk_freq = 25000000;
1436 ref_clk_freq = 2500000;
1439 if ((mii->mii_media_active & IFM_FDX) != 0)
1440 net_cfg |= CGEM_NET_CFG_FULL_DUPLEX;
1442 WR4(sc, CGEM_NET_CFG, net_cfg);
1444 /* Set the reference clock if necessary. */
1445 if (cgem_set_ref_clk(sc->ref_clk_num, ref_clk_freq))
1446 device_printf(sc->dev, "cgem_mediachange: "
1447 "could not set ref clk%d to %d.\n",
1448 sc->ref_clk_num, ref_clk_freq);
1450 sc->mii_media_active = mii->mii_media_active;
1454 cgem_add_sysctls(device_t dev)
1456 struct cgem_softc *sc = device_get_softc(dev);
1457 struct sysctl_ctx_list *ctx;
1458 struct sysctl_oid_list *child;
1459 struct sysctl_oid *tree;
1461 ctx = device_get_sysctl_ctx(dev);
1462 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
1464 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW,
1466 "Number receive buffers to provide");
1468 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW,
1470 "Enable receive hang work-around");
1472 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD,
1474 "Receive overrun events");
1476 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD,
1478 "Receive buf queue empty events");
1480 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD,
1481 &sc->rxdmamapfails, 0,
1482 "Receive DMA map failures");
1484 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD,
1486 "Transmit ring full events");
1488 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD,
1489 &sc->txdmamapfails, 0,
1490 "Transmit DMA map failures");
1492 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD,
1494 "Transmit m_defrag() calls");
1496 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD,
1497 &sc->txdefragfails, 0,
1498 "Transmit m_defrag() failures");
1500 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
1501 NULL, "GEM statistics");
1502 child = SYSCTL_CHILDREN(tree);
1504 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD,
1505 &sc->stats.tx_bytes, "Total bytes transmitted");
1507 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD,
1508 &sc->stats.tx_frames, 0, "Total frames transmitted");
1509 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD,
1510 &sc->stats.tx_frames_bcast, 0,
1511 "Number broadcast frames transmitted");
1512 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD,
1513 &sc->stats.tx_frames_multi, 0,
1514 "Number multicast frames transmitted");
1515 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause",
1516 CTLFLAG_RD, &sc->stats.tx_frames_pause, 0,
1517 "Number pause frames transmitted");
1518 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD,
1519 &sc->stats.tx_frames_64b, 0,
1520 "Number frames transmitted of size 64 bytes or less");
1521 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD,
1522 &sc->stats.tx_frames_65to127b, 0,
1523 "Number frames transmitted of size 65-127 bytes");
1524 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b",
1525 CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0,
1526 "Number frames transmitted of size 128-255 bytes");
1527 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b",
1528 CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0,
1529 "Number frames transmitted of size 256-511 bytes");
1530 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b",
1531 CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0,
1532 "Number frames transmitted of size 512-1023 bytes");
1533 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b",
1534 CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0,
1535 "Number frames transmitted of size 1024-1536 bytes");
1536 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs",
1537 CTLFLAG_RD, &sc->stats.tx_under_runs, 0,
1538 "Number transmit under-run events");
1539 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn",
1540 CTLFLAG_RD, &sc->stats.tx_single_collisn, 0,
1541 "Number single-collision transmit frames");
1542 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn",
1543 CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0,
1544 "Number multi-collision transmit frames");
1545 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn",
1546 CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0,
1547 "Number excessive collision transmit frames");
1548 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn",
1549 CTLFLAG_RD, &sc->stats.tx_late_collisn, 0,
1550 "Number late-collision transmit frames");
1551 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames",
1552 CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0,
1553 "Number deferred transmit frames");
1554 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs",
1555 CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0,
1556 "Number carrier sense errors on transmit");
1558 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD,
1559 &sc->stats.rx_bytes, "Total bytes received");
1561 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD,
1562 &sc->stats.rx_frames, 0, "Total frames received");
1563 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast",
1564 CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0,
1565 "Number broadcast frames received");
1566 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi",
1567 CTLFLAG_RD, &sc->stats.rx_frames_multi, 0,
1568 "Number multicast frames received");
1569 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause",
1570 CTLFLAG_RD, &sc->stats.rx_frames_pause, 0,
1571 "Number pause frames received");
1572 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b",
1573 CTLFLAG_RD, &sc->stats.rx_frames_64b, 0,
1574 "Number frames received of size 64 bytes or less");
1575 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b",
1576 CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0,
1577 "Number frames received of size 65-127 bytes");
1578 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b",
1579 CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0,
1580 "Number frames received of size 128-255 bytes");
1581 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b",
1582 CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0,
1583 "Number frames received of size 256-511 bytes");
1584 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b",
1585 CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0,
1586 "Number frames received of size 512-1023 bytes");
1587 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b",
1588 CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0,
1589 "Number frames received of size 1024-1536 bytes");
1590 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize",
1591 CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0,
1592 "Number undersize frames received");
1593 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize",
1594 CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0,
1595 "Number oversize frames received");
1596 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber",
1597 CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0,
1598 "Number jabber frames received");
1599 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs",
1600 CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0,
1601 "Number frames received with FCS errors");
1602 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs",
1603 CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0,
1604 "Number frames received with length errors");
1605 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs",
1606 CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0,
1607 "Number receive symbol errors");
1608 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs",
1609 CTLFLAG_RD, &sc->stats.rx_align_errs, 0,
1610 "Number receive alignment errors");
1611 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs",
1612 CTLFLAG_RD, &sc->stats.rx_resource_errs, 0,
1613 "Number frames received when no rx buffer available");
1614 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs",
1615 CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0,
1616 "Number frames received but not copied due to "
1618 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs",
1619 CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0,
1620 "Number frames received with IP header checksum "
1622 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs",
1623 CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0,
1624 "Number frames received with TCP checksum errors");
1625 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs",
1626 CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0,
1627 "Number frames received with UDP checksum errors");
1632 cgem_probe(device_t dev)
1635 if (!ofw_bus_status_okay(dev))
1638 if (!ofw_bus_is_compatible(dev, "cadence,gem"))
1641 device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface");
1646 cgem_attach(device_t dev)
1648 struct cgem_softc *sc = device_get_softc(dev);
1653 u_char eaddr[ETHER_ADDR_LEN];
1658 /* Get reference clock number and base divider from fdt. */
1659 node = ofw_bus_get_node(dev);
1660 sc->ref_clk_num = 0;
1661 if (OF_getprop(node, "ref-clock-num", &cell, sizeof(cell)) > 0)
1662 sc->ref_clk_num = fdt32_to_cpu(cell);
1664 /* Get memory resource. */
1666 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1668 if (sc->mem_res == NULL) {
1669 device_printf(dev, "could not allocate memory resources.\n");
1673 /* Get IRQ resource. */
1675 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1677 if (sc->irq_res == NULL) {
1678 device_printf(dev, "could not allocate interrupt resource.\n");
1683 /* Set up ifnet structure. */
1684 ifp = sc->ifp = if_alloc(IFT_ETHER);
1686 device_printf(dev, "could not allocate ifnet structure\n");
1690 if_setsoftc(ifp, sc);
1691 if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev));
1692 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1693 if_setinitfn(ifp, cgem_init);
1694 if_setioctlfn(ifp, cgem_ioctl);
1695 if_setstartfn(ifp, cgem_start);
1696 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
1697 IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM, 0);
1698 if_setsendqlen(ifp, CGEM_NUM_TX_DESCS);
1699 if_setsendqready(ifp);
1701 /* Disable hardware checksumming by default. */
1702 if_sethwassist(ifp, 0);
1703 if_setcapenable(ifp, if_getcapabilities(ifp) &
1704 ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM));
1706 sc->if_old_flags = if_getflags(ifp);
1707 sc->rxbufs = DEFAULT_NUM_RX_BUFS;
1710 /* Reset hardware. */
1715 /* Attach phy to mii bus. */
1716 err = mii_attach(dev, &sc->miibus, ifp,
1717 cgem_ifmedia_upd, cgem_ifmedia_sts,
1718 BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1720 device_printf(dev, "attaching PHYs failed\n");
1725 /* Set up TX and RX descriptor area. */
1726 err = cgem_setup_descs(sc);
1728 device_printf(dev, "could not set up dma mem for descs.\n");
1733 /* Get a MAC address. */
1734 cgem_get_mac(sc, eaddr);
1737 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1739 ether_ifattach(ifp, eaddr);
1741 err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
1742 INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
1744 device_printf(dev, "could not set interrupt handler.\n");
1745 ether_ifdetach(ifp);
1750 cgem_add_sysctls(dev);
1756 cgem_detach(device_t dev)
1758 struct cgem_softc *sc = device_get_softc(dev);
1764 if (device_is_attached(dev)) {
1768 callout_drain(&sc->tick_ch);
1769 if_setflagbits(sc->ifp, 0, IFF_UP);
1770 ether_ifdetach(sc->ifp);
1773 if (sc->miibus != NULL) {
1774 device_delete_child(dev, sc->miibus);
1778 /* Release resources. */
1779 if (sc->mem_res != NULL) {
1780 bus_release_resource(dev, SYS_RES_MEMORY,
1781 rman_get_rid(sc->mem_res), sc->mem_res);
1784 if (sc->irq_res != NULL) {
1786 bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1787 bus_release_resource(dev, SYS_RES_IRQ,
1788 rman_get_rid(sc->irq_res), sc->irq_res);
1792 /* Release DMA resources. */
1793 if (sc->rxring != NULL) {
1794 if (sc->rxring_physaddr != 0) {
1795 bus_dmamap_unload(sc->desc_dma_tag,
1796 sc->rxring_dma_map);
1797 sc->rxring_physaddr = 0;
1799 bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
1800 sc->rxring_dma_map);
1802 for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
1803 if (sc->rxring_m_dmamap[i] != NULL) {
1804 bus_dmamap_destroy(sc->mbuf_dma_tag,
1805 sc->rxring_m_dmamap[i]);
1806 sc->rxring_m_dmamap[i] = NULL;
1809 if (sc->txring != NULL) {
1810 if (sc->txring_physaddr != 0) {
1811 bus_dmamap_unload(sc->desc_dma_tag,
1812 sc->txring_dma_map);
1813 sc->txring_physaddr = 0;
1815 bus_dmamem_free(sc->desc_dma_tag, sc->txring,
1816 sc->txring_dma_map);
1818 for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
1819 if (sc->txring_m_dmamap[i] != NULL) {
1820 bus_dmamap_destroy(sc->mbuf_dma_tag,
1821 sc->txring_m_dmamap[i]);
1822 sc->txring_m_dmamap[i] = NULL;
1825 if (sc->desc_dma_tag != NULL) {
1826 bus_dma_tag_destroy(sc->desc_dma_tag);
1827 sc->desc_dma_tag = NULL;
1829 if (sc->mbuf_dma_tag != NULL) {
1830 bus_dma_tag_destroy(sc->mbuf_dma_tag);
1831 sc->mbuf_dma_tag = NULL;
1834 bus_generic_detach(dev);
1836 CGEM_LOCK_DESTROY(sc);
1841 static device_method_t cgem_methods[] = {
1842 /* Device interface */
1843 DEVMETHOD(device_probe, cgem_probe),
1844 DEVMETHOD(device_attach, cgem_attach),
1845 DEVMETHOD(device_detach, cgem_detach),
1848 DEVMETHOD(bus_child_detached, cgem_child_detached),
1851 DEVMETHOD(miibus_readreg, cgem_miibus_readreg),
1852 DEVMETHOD(miibus_writereg, cgem_miibus_writereg),
1853 DEVMETHOD(miibus_statchg, cgem_miibus_statchg),
1854 DEVMETHOD(miibus_linkchg, cgem_miibus_linkchg),
1859 static driver_t cgem_driver = {
1862 sizeof(struct cgem_softc),
1865 DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL);
1866 DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL);
1867 MODULE_DEPEND(cgem, miibus, 1, 1, 1);
1868 MODULE_DEPEND(cgem, ether, 1, 1, 1);