2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2012-2014 Thomas Skibo <thomasskibo@yahoo.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * A network interface driver for Cadence GEM Gigabit Ethernet
31 * interface such as the one used in Xilinx Zynq-7000 SoC.
33 * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
34 * (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16
35 * and register definitions are in appendix B.18.
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
41 #include <sys/param.h>
42 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
47 #include <sys/module.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
53 #include <machine/bus.h>
55 #include <net/ethernet.h>
57 #include <net/if_arp.h>
58 #include <net/if_dl.h>
59 #include <net/if_media.h>
60 #include <net/if_mib.h>
61 #include <net/if_types.h>
64 #include <netinet/in.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in_var.h>
67 #include <netinet/ip.h>
71 #include <net/bpfdesc.h>
73 #include <dev/fdt/fdt_common.h>
74 #include <dev/ofw/ofw_bus.h>
75 #include <dev/ofw/ofw_bus_subr.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
81 #include <dev/extres/clk/clk.h>
84 #if BUS_SPACE_MAXADDR > BUS_SPACE_MAXADDR_32BIT
88 #include <dev/cadence/if_cgem_hw.h>
90 #include "miibus_if.h"
92 #define IF_CGEM_NAME "cgem"
94 #define CGEM_NUM_RX_DESCS 512 /* size of receive descriptor ring */
95 #define CGEM_NUM_TX_DESCS 512 /* size of transmit descriptor ring */
97 /* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */
98 #define DEFAULT_NUM_RX_BUFS 256 /* number of receive bufs to queue. */
100 #define TX_MAX_DMA_SEGS 8 /* maximum segs in a tx mbuf dma */
102 #define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \
103 CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
105 #define HWTYPE_GENERIC_GEM 1
106 #define HWTYPE_ZYNQ 2
107 #define HWTYPE_ZYNQMP 3
108 #define HWTYPE_SIFIVE_FU540 4
110 static struct ofw_compat_data compat_data[] = {
111 { "cdns,zynq-gem", HWTYPE_ZYNQ },
112 { "cdns,zynqmp-gem", HWTYPE_ZYNQMP },
113 { "sifive,fu540-c000-gem", HWTYPE_SIFIVE_FU540 },
114 { "cdns,gem", HWTYPE_GENERIC_GEM },
115 { "cadence,gem", HWTYPE_GENERIC_GEM },
124 u_int mii_media_active; /* last active media */
126 struct resource *mem_res;
127 struct resource *irq_res;
129 struct callout tick_ch;
130 uint32_t net_ctl_shadow;
131 uint32_t net_cfg_shadow;
139 bus_dma_tag_t desc_dma_tag;
140 bus_dma_tag_t mbuf_dma_tag;
142 /* receive descriptor ring */
143 struct cgem_rx_desc *rxring;
144 bus_addr_t rxring_physaddr;
145 struct mbuf *rxring_m[CGEM_NUM_RX_DESCS];
146 bus_dmamap_t rxring_m_dmamap[CGEM_NUM_RX_DESCS];
147 int rxring_hd_ptr; /* where to put rcv bufs */
148 int rxring_tl_ptr; /* where to get receives */
149 int rxring_queued; /* how many rcv bufs queued */
150 bus_dmamap_t rxring_dma_map;
151 int rxbufs; /* tunable number rcv bufs */
152 int rxhangwar; /* rx hang work-around */
153 u_int rxoverruns; /* rx overruns */
154 u_int rxnobufs; /* rx buf ring empty events */
155 u_int rxdmamapfails; /* rx dmamap failures */
156 uint32_t rx_frames_prev;
158 /* transmit descriptor ring */
159 struct cgem_tx_desc *txring;
160 bus_addr_t txring_physaddr;
161 struct mbuf *txring_m[CGEM_NUM_TX_DESCS];
162 bus_dmamap_t txring_m_dmamap[CGEM_NUM_TX_DESCS];
163 int txring_hd_ptr; /* where to put next xmits */
164 int txring_tl_ptr; /* next xmit mbuf to free */
165 int txring_queued; /* num xmits segs queued */
166 u_int txfull; /* tx ring full events */
167 u_int txdefrags; /* tx calls to m_defrag() */
168 u_int txdefragfails; /* tx m_defrag() failures */
169 u_int txdmamapfails; /* tx dmamap failures */
171 /* null descriptor rings */
173 bus_addr_t null_qs_physaddr;
175 /* hardware provided statistics */
176 struct cgem_hw_stats {
179 uint32_t tx_frames_bcast;
180 uint32_t tx_frames_multi;
181 uint32_t tx_frames_pause;
182 uint32_t tx_frames_64b;
183 uint32_t tx_frames_65to127b;
184 uint32_t tx_frames_128to255b;
185 uint32_t tx_frames_256to511b;
186 uint32_t tx_frames_512to1023b;
187 uint32_t tx_frames_1024to1536b;
188 uint32_t tx_under_runs;
189 uint32_t tx_single_collisn;
190 uint32_t tx_multi_collisn;
191 uint32_t tx_excsv_collisn;
192 uint32_t tx_late_collisn;
193 uint32_t tx_deferred_frames;
194 uint32_t tx_carrier_sense_errs;
198 uint32_t rx_frames_bcast;
199 uint32_t rx_frames_multi;
200 uint32_t rx_frames_pause;
201 uint32_t rx_frames_64b;
202 uint32_t rx_frames_65to127b;
203 uint32_t rx_frames_128to255b;
204 uint32_t rx_frames_256to511b;
205 uint32_t rx_frames_512to1023b;
206 uint32_t rx_frames_1024to1536b;
207 uint32_t rx_frames_undersize;
208 uint32_t rx_frames_oversize;
209 uint32_t rx_frames_jabber;
210 uint32_t rx_frames_fcs_errs;
211 uint32_t rx_frames_length_errs;
212 uint32_t rx_symbol_errs;
213 uint32_t rx_align_errs;
214 uint32_t rx_resource_errs;
215 uint32_t rx_overrun_errs;
216 uint32_t rx_ip_hdr_csum_errs;
217 uint32_t rx_tcp_csum_errs;
218 uint32_t rx_udp_csum_errs;
222 #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off)))
223 #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val)))
224 #define BARRIER(sc, off, len, flags) \
225 (bus_barrier((sc)->mem_res, (off), (len), (flags))
227 #define CGEM_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
228 #define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
229 #define CGEM_LOCK_INIT(sc) mtx_init(&(sc)->sc_mtx, \
230 device_get_nameunit((sc)->dev), MTX_NETWORK_LOCK, MTX_DEF)
231 #define CGEM_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx)
232 #define CGEM_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
234 /* Allow platforms to optionally provide a way to set the reference clock. */
235 int cgem_set_ref_clk(int unit, int frequency);
237 static devclass_t cgem_devclass;
239 static int cgem_probe(device_t dev);
240 static int cgem_attach(device_t dev);
241 static int cgem_detach(device_t dev);
242 static void cgem_tick(void *);
243 static void cgem_intr(void *);
245 static void cgem_mediachange(struct cgem_softc *, struct mii_data *);
248 cgem_get_mac(struct cgem_softc *sc, u_char eaddr[])
253 /* See if boot loader gave us a MAC address already. */
254 for (i = 0; i < 4; i++) {
255 uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i));
256 uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff;
257 if (low != 0 || high != 0) {
258 eaddr[0] = low & 0xff;
259 eaddr[1] = (low >> 8) & 0xff;
260 eaddr[2] = (low >> 16) & 0xff;
261 eaddr[3] = (low >> 24) & 0xff;
262 eaddr[4] = high & 0xff;
263 eaddr[5] = (high >> 8) & 0xff;
268 /* No MAC from boot loader? Assign a random one. */
275 eaddr[3] = (rnd >> 16) & 0xff;
276 eaddr[4] = (rnd >> 8) & 0xff;
277 eaddr[5] = rnd & 0xff;
279 device_printf(sc->dev, "no mac address found, assigning "
280 "random: %02x:%02x:%02x:%02x:%02x:%02x\n", eaddr[0],
281 eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]);
284 /* Move address to first slot and zero out the rest. */
285 WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
286 (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
287 WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
289 for (i = 1; i < 4; i++) {
290 WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0);
291 WR4(sc, CGEM_SPEC_ADDR_HI(i), 0);
296 * cgem_mac_hash(): map 48-bit address to a 6-bit hash. The 6-bit hash
297 * corresponds to a bit in a 64-bit hash register. Setting that bit in the
298 * hash register enables reception of all frames with a destination address
299 * that hashes to that 6-bit value.
301 * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
302 * Reference Manual. Bits 0-5 in the hash are the exclusive-or of
303 * every sixth bit in the destination address.
306 cgem_mac_hash(u_char eaddr[])
312 for (i = 0; i < 6; i++)
313 for (j = i; j < 48; j += 6)
314 if ((eaddr[j >> 3] & (1 << (j & 7))) != 0)
321 cgem_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
323 uint32_t *hashes = arg;
326 index = cgem_mac_hash(LLADDR(sdl));
328 hashes[0] |= (1U << (index - 32));
330 hashes[1] |= (1U << index);
336 * After any change in rx flags or multi-cast addresses, set up hash registers
337 * and net config register bits.
340 cgem_rx_filter(struct cgem_softc *sc)
343 uint32_t hashes[2] = { 0, 0 };
345 sc->net_cfg_shadow &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
346 CGEM_NET_CFG_NO_BCAST | CGEM_NET_CFG_COPY_ALL);
348 if ((if_getflags(ifp) & IFF_PROMISC) != 0)
349 sc->net_cfg_shadow |= CGEM_NET_CFG_COPY_ALL;
351 if ((if_getflags(ifp) & IFF_BROADCAST) == 0)
352 sc->net_cfg_shadow |= CGEM_NET_CFG_NO_BCAST;
353 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
354 hashes[0] = 0xffffffff;
355 hashes[1] = 0xffffffff;
357 if_foreach_llmaddr(ifp, cgem_hash_maddr, hashes);
359 if (hashes[0] != 0 || hashes[1] != 0)
360 sc->net_cfg_shadow |= CGEM_NET_CFG_MULTI_HASH_EN;
363 WR4(sc, CGEM_HASH_TOP, hashes[0]);
364 WR4(sc, CGEM_HASH_BOT, hashes[1]);
365 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
368 /* For bus_dmamap_load() callback. */
370 cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
373 if (nsegs != 1 || error != 0)
375 *(bus_addr_t *)arg = segs[0].ds_addr;
378 /* Set up null queues for priority queues we actually can't disable. */
380 cgem_null_qs(struct cgem_softc *sc)
382 struct cgem_rx_desc *rx_desc;
383 struct cgem_tx_desc *tx_desc;
387 /* Read design config register 6 to determine number of queues. */
388 queue_mask = (RD4(sc, CGEM_DESIGN_CFG6) &
389 CGEM_DESIGN_CFG6_DMA_PRIO_Q_MASK) >> 1;
393 /* Create empty RX queue and empty TX buf queues. */
394 memset(sc->null_qs, 0, sizeof(struct cgem_rx_desc) +
395 sizeof(struct cgem_tx_desc));
396 rx_desc = sc->null_qs;
397 rx_desc->addr = CGEM_RXDESC_OWN | CGEM_RXDESC_WRAP;
398 tx_desc = (struct cgem_tx_desc *)(rx_desc + 1);
399 tx_desc->ctl = CGEM_TXDESC_USED | CGEM_TXDESC_WRAP;
401 /* Point all valid ring base pointers to the null queues. */
402 for (n = 1; (queue_mask & 1) != 0; n++, queue_mask >>= 1) {
403 WR4(sc, CGEM_RX_QN_BAR(n), sc->null_qs_physaddr);
404 WR4(sc, CGEM_TX_QN_BAR(n), sc->null_qs_physaddr +
405 sizeof(struct cgem_rx_desc));
409 /* Create DMA'able descriptor rings. */
411 cgem_setup_descs(struct cgem_softc *sc)
414 int desc_rings_size = CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc) +
415 CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc);
418 desc_rings_size += sizeof(struct cgem_rx_desc) +
419 sizeof(struct cgem_tx_desc);
424 /* Allocate non-cached DMA space for RX and TX descriptors. */
425 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1,
427 1ULL << 32, /* Do not cross a 4G boundary. */
431 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
432 desc_rings_size, 1, desc_rings_size, 0,
433 busdma_lock_mutex, &sc->sc_mtx, &sc->desc_dma_tag);
437 /* Set up a bus_dma_tag for mbufs. */
438 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
439 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
440 TX_MAX_DMA_SEGS, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx,
446 * Allocate DMA memory. We allocate transmit, receive and null
447 * descriptor queues all at once because the hardware only provides
448 * one register for the upper 32 bits of rx and tx descriptor queues
449 * hardware addresses.
451 err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->rxring,
452 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO,
453 &sc->rxring_dma_map);
457 /* Load descriptor DMA memory. */
458 err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
459 (void *)sc->rxring, desc_rings_size,
460 cgem_getaddr, &sc->rxring_physaddr, BUS_DMA_NOWAIT);
464 /* Initialize RX descriptors. */
465 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
466 sc->rxring[i].addr = CGEM_RXDESC_OWN;
467 sc->rxring[i].ctl = 0;
468 sc->rxring_m[i] = NULL;
469 sc->rxring_m_dmamap[i] = NULL;
471 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
473 sc->rxring_hd_ptr = 0;
474 sc->rxring_tl_ptr = 0;
475 sc->rxring_queued = 0;
477 sc->txring = (struct cgem_tx_desc *)(sc->rxring + CGEM_NUM_RX_DESCS);
478 sc->txring_physaddr = sc->rxring_physaddr + CGEM_NUM_RX_DESCS *
479 sizeof(struct cgem_rx_desc);
481 /* Initialize TX descriptor ring. */
482 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
483 sc->txring[i].addr = 0;
484 sc->txring[i].ctl = CGEM_TXDESC_USED;
485 sc->txring_m[i] = NULL;
486 sc->txring_m_dmamap[i] = NULL;
488 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
490 sc->txring_hd_ptr = 0;
491 sc->txring_tl_ptr = 0;
492 sc->txring_queued = 0;
494 if (sc->neednullqs) {
495 sc->null_qs = (void *)(sc->txring + CGEM_NUM_TX_DESCS);
496 sc->null_qs_physaddr = sc->txring_physaddr +
497 CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc);
505 /* Fill receive descriptor ring with mbufs. */
507 cgem_fill_rqueue(struct cgem_softc *sc)
509 struct mbuf *m = NULL;
510 bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
513 CGEM_ASSERT_LOCKED(sc);
515 while (sc->rxring_queued < sc->rxbufs) {
516 /* Get a cluster mbuf. */
517 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
522 m->m_pkthdr.len = MCLBYTES;
523 m->m_pkthdr.rcvif = sc->ifp;
525 /* Load map and plug in physical address. */
526 if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
527 &sc->rxring_m_dmamap[sc->rxring_hd_ptr])) {
532 if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
533 sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
534 segs, &nsegs, BUS_DMA_NOWAIT)) {
536 bus_dmamap_destroy(sc->mbuf_dma_tag,
537 sc->rxring_m_dmamap[sc->rxring_hd_ptr]);
538 sc->rxring_m_dmamap[sc->rxring_hd_ptr] = NULL;
542 sc->rxring_m[sc->rxring_hd_ptr] = m;
544 /* Sync cache with receive buffer. */
545 bus_dmamap_sync(sc->mbuf_dma_tag,
546 sc->rxring_m_dmamap[sc->rxring_hd_ptr],
547 BUS_DMASYNC_PREREAD);
549 /* Write rx descriptor and increment head pointer. */
550 sc->rxring[sc->rxring_hd_ptr].ctl = 0;
552 sc->rxring[sc->rxring_hd_ptr].addrhi = segs[0].ds_addr >> 32;
554 if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
555 sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
557 sc->rxring_hd_ptr = 0;
559 sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
565 /* Pull received packets off of receive descriptor ring. */
567 cgem_recv(struct cgem_softc *sc)
570 struct mbuf *m, *m_hd, **m_tl;
573 CGEM_ASSERT_LOCKED(sc);
575 /* Pick up all packets in which the OWN bit is set. */
578 while (sc->rxring_queued > 0 &&
579 (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
580 ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
582 /* Grab filled mbuf. */
583 m = sc->rxring_m[sc->rxring_tl_ptr];
584 sc->rxring_m[sc->rxring_tl_ptr] = NULL;
586 /* Sync cache with receive buffer. */
587 bus_dmamap_sync(sc->mbuf_dma_tag,
588 sc->rxring_m_dmamap[sc->rxring_tl_ptr],
589 BUS_DMASYNC_POSTREAD);
591 /* Unload and destroy dmamap. */
592 bus_dmamap_unload(sc->mbuf_dma_tag,
593 sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
594 bus_dmamap_destroy(sc->mbuf_dma_tag,
595 sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
596 sc->rxring_m_dmamap[sc->rxring_tl_ptr] = NULL;
598 /* Increment tail pointer. */
599 if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS)
600 sc->rxring_tl_ptr = 0;
604 * Check FCS and make sure entire packet landed in one mbuf
605 * cluster (which is much bigger than the largest ethernet
608 if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 ||
609 (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) !=
610 (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) {
613 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
617 /* Ready it to hand off to upper layers. */
618 m->m_data += ETHER_ALIGN;
619 m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK);
620 m->m_pkthdr.rcvif = ifp;
621 m->m_pkthdr.len = m->m_len;
624 * Are we using hardware checksumming? Check the status in the
625 * receive descriptor.
627 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
628 /* TCP or UDP checks out, IP checks out too. */
629 if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
630 CGEM_RXDESC_CKSUM_STAT_TCP_GOOD ||
631 (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
632 CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) {
633 m->m_pkthdr.csum_flags |=
634 CSUM_IP_CHECKED | CSUM_IP_VALID |
635 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
636 m->m_pkthdr.csum_data = 0xffff;
637 } else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
638 CGEM_RXDESC_CKSUM_STAT_IP_GOOD) {
639 /* Only IP checks out. */
640 m->m_pkthdr.csum_flags |=
641 CSUM_IP_CHECKED | CSUM_IP_VALID;
642 m->m_pkthdr.csum_data = 0xffff;
646 /* Queue it up for delivery below. */
651 /* Replenish receive buffers. */
652 cgem_fill_rqueue(sc);
654 /* Unlock and send up packets. */
656 while (m_hd != NULL) {
660 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
666 /* Find completed transmits and free their mbufs. */
668 cgem_clean_tx(struct cgem_softc *sc)
673 CGEM_ASSERT_LOCKED(sc);
675 /* free up finished transmits. */
676 while (sc->txring_queued > 0 &&
677 ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
678 CGEM_TXDESC_USED) != 0) {
680 bus_dmamap_sync(sc->mbuf_dma_tag,
681 sc->txring_m_dmamap[sc->txring_tl_ptr],
682 BUS_DMASYNC_POSTWRITE);
684 /* Unload and destroy DMA map. */
685 bus_dmamap_unload(sc->mbuf_dma_tag,
686 sc->txring_m_dmamap[sc->txring_tl_ptr]);
687 bus_dmamap_destroy(sc->mbuf_dma_tag,
688 sc->txring_m_dmamap[sc->txring_tl_ptr]);
689 sc->txring_m_dmamap[sc->txring_tl_ptr] = NULL;
691 /* Free up the mbuf. */
692 m = sc->txring_m[sc->txring_tl_ptr];
693 sc->txring_m[sc->txring_tl_ptr] = NULL;
696 /* Check the status. */
697 if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
698 /* Serious bus error. log to console. */
700 device_printf(sc->dev,
701 "cgem_clean_tx: AHB error, addr=0x%x%08x\n",
702 sc->txring[sc->txring_tl_ptr].addrhi,
703 sc->txring[sc->txring_tl_ptr].addr);
705 device_printf(sc->dev,
706 "cgem_clean_tx: AHB error, addr=0x%x\n",
707 sc->txring[sc->txring_tl_ptr].addr);
709 } else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
710 CGEM_TXDESC_LATE_COLL)) != 0) {
711 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
713 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
716 * If the packet spanned more than one tx descriptor, skip
717 * descriptors until we find the end so that only
718 * start-of-frame descriptors are processed.
720 while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) {
721 if ((ctl & CGEM_TXDESC_WRAP) != 0)
722 sc->txring_tl_ptr = 0;
727 ctl = sc->txring[sc->txring_tl_ptr].ctl;
729 sc->txring[sc->txring_tl_ptr].ctl =
730 ctl | CGEM_TXDESC_USED;
733 /* Next descriptor. */
734 if ((ctl & CGEM_TXDESC_WRAP) != 0)
735 sc->txring_tl_ptr = 0;
740 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
744 /* Start transmits. */
746 cgem_start_locked(if_t ifp)
748 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
750 bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
752 int i, nsegs, wrap, err;
754 CGEM_ASSERT_LOCKED(sc);
756 if ((if_getdrvflags(ifp) & IFF_DRV_OACTIVE) != 0)
760 /* Check that there is room in the descriptor ring. */
761 if (sc->txring_queued >=
762 CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
763 /* Try to make room. */
767 if (sc->txring_queued >=
768 CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
769 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
775 /* Grab next transmit packet. */
780 /* Create and load DMA map. */
781 if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
782 &sc->txring_m_dmamap[sc->txring_hd_ptr])) {
787 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
788 sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs, &nsegs,
791 /* Too many segments! defrag and try again. */
792 struct mbuf *m2 = m_defrag(m, M_NOWAIT);
797 bus_dmamap_destroy(sc->mbuf_dma_tag,
798 sc->txring_m_dmamap[sc->txring_hd_ptr]);
799 sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
803 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
804 sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs,
805 &nsegs, BUS_DMA_NOWAIT);
811 bus_dmamap_destroy(sc->mbuf_dma_tag,
812 sc->txring_m_dmamap[sc->txring_hd_ptr]);
813 sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
817 sc->txring_m[sc->txring_hd_ptr] = m;
819 /* Sync tx buffer with cache. */
820 bus_dmamap_sync(sc->mbuf_dma_tag,
821 sc->txring_m_dmamap[sc->txring_hd_ptr],
822 BUS_DMASYNC_PREWRITE);
824 /* Set wrap flag if next packet might run off end of ring. */
825 wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
829 * Fill in the TX descriptors back to front so that USED bit in
830 * first descriptor is cleared last.
832 for (i = nsegs - 1; i >= 0; i--) {
833 /* Descriptor address. */
834 sc->txring[sc->txring_hd_ptr + i].addr =
837 sc->txring[sc->txring_hd_ptr + i].addrhi =
838 segs[i].ds_addr >> 32;
840 /* Descriptor control word. */
841 ctl = segs[i].ds_len;
842 if (i == nsegs - 1) {
843 ctl |= CGEM_TXDESC_LAST_BUF;
845 ctl |= CGEM_TXDESC_WRAP;
847 sc->txring[sc->txring_hd_ptr + i].ctl = ctl;
850 sc->txring_m[sc->txring_hd_ptr + i] = NULL;
854 sc->txring_hd_ptr = 0;
856 sc->txring_hd_ptr += nsegs;
857 sc->txring_queued += nsegs;
859 /* Kick the transmitter. */
860 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
861 CGEM_NET_CTRL_START_TX);
863 /* If there is a BPF listener, bounce a copy to him. */
864 ETHER_BPF_MTAP(ifp, m);
871 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
874 cgem_start_locked(ifp);
879 cgem_poll_hw_stats(struct cgem_softc *sc)
883 CGEM_ASSERT_LOCKED(sc);
885 sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT);
886 sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32;
888 sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX);
889 sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX);
890 sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX);
891 sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX);
892 sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX);
893 sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX);
894 sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX);
895 sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX);
896 sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX);
897 sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX);
898 sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS);
900 n = RD4(sc, CGEM_SINGLE_COLL_FRAMES);
901 sc->stats.tx_single_collisn += n;
902 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
903 n = RD4(sc, CGEM_MULTI_COLL_FRAMES);
904 sc->stats.tx_multi_collisn += n;
905 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
906 n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES);
907 sc->stats.tx_excsv_collisn += n;
908 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
909 n = RD4(sc, CGEM_LATE_COLL);
910 sc->stats.tx_late_collisn += n;
911 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
913 sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES);
914 sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS);
916 sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT);
917 sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32;
919 sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX);
920 sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX);
921 sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX);
922 sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX);
923 sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX);
924 sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX);
925 sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX);
926 sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX);
927 sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX);
928 sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX);
929 sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX);
930 sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX);
931 sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX);
932 sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS);
933 sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS);
934 sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS);
935 sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS);
936 sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS);
937 sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS);
938 sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS);
939 sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS);
940 sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS);
946 struct cgem_softc *sc = (struct cgem_softc *)arg;
947 struct mii_data *mii;
949 CGEM_ASSERT_LOCKED(sc);
952 if (sc->miibus != NULL) {
953 mii = device_get_softc(sc->miibus);
957 /* Poll statistics registers. */
958 cgem_poll_hw_stats(sc);
960 /* Check for receiver hang. */
961 if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) {
963 * Reset receiver logic by toggling RX_EN bit. 1usec
964 * delay is necessary especially when operating at 100mbps
967 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow &
968 ~CGEM_NET_CTRL_RX_EN);
970 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
972 sc->rx_frames_prev = sc->stats.rx_frames;
974 /* Next callout in one second. */
975 callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
978 /* Interrupt handler. */
982 struct cgem_softc *sc = (struct cgem_softc *)arg;
988 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
993 /* Read interrupt status and immediately clear the bits. */
994 istatus = RD4(sc, CGEM_INTR_STAT);
995 WR4(sc, CGEM_INTR_STAT, istatus);
997 /* Packets received. */
998 if ((istatus & CGEM_INTR_RX_COMPLETE) != 0)
1001 /* Free up any completed transmit buffers. */
1004 /* Hresp not ok. Something is very bad with DMA. Try to clear. */
1005 if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) {
1006 device_printf(sc->dev,
1007 "cgem_intr: hresp not okay! rx_status=0x%x\n",
1008 RD4(sc, CGEM_RX_STAT));
1009 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK);
1012 /* Receiver overrun. */
1013 if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) {
1014 /* Clear status bit. */
1015 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN);
1019 /* Receiver ran out of bufs. */
1020 if ((istatus & CGEM_INTR_RX_USED_READ) != 0) {
1021 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
1022 CGEM_NET_CTRL_FLUSH_DPRAM_PKT);
1023 cgem_fill_rqueue(sc);
1027 /* Restart transmitter if needed. */
1028 if (!if_sendq_empty(ifp))
1029 cgem_start_locked(ifp);
1034 /* Reset hardware. */
1036 cgem_reset(struct cgem_softc *sc)
1039 CGEM_ASSERT_LOCKED(sc);
1041 /* Determine data bus width from design configuration register. */
1042 switch (RD4(sc, CGEM_DESIGN_CFG1) &
1043 CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_MASK) {
1044 case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_64:
1045 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_64;
1047 case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_128:
1048 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_128;
1051 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_32;
1054 WR4(sc, CGEM_NET_CTRL, 0);
1055 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1056 WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
1057 WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
1058 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
1059 WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL);
1060 WR4(sc, CGEM_HASH_BOT, 0);
1061 WR4(sc, CGEM_HASH_TOP, 0);
1062 WR4(sc, CGEM_TX_QBAR, 0); /* manual says do this. */
1063 WR4(sc, CGEM_RX_QBAR, 0);
1065 /* Get management port running even if interface is down. */
1066 sc->net_cfg_shadow |= CGEM_NET_CFG_MDC_CLK_DIV_48;
1067 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1069 sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
1070 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
1073 /* Bring up the hardware. */
1075 cgem_config(struct cgem_softc *sc)
1079 u_char *eaddr = if_getlladdr(ifp);
1081 CGEM_ASSERT_LOCKED(sc);
1083 /* Program Net Config Register. */
1084 sc->net_cfg_shadow &= (CGEM_NET_CFG_MDC_CLK_DIV_MASK |
1085 CGEM_NET_CFG_DBUS_WIDTH_MASK);
1086 sc->net_cfg_shadow |= (CGEM_NET_CFG_FCS_REMOVE |
1087 CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
1088 CGEM_NET_CFG_GIGE_EN | CGEM_NET_CFG_1536RXEN |
1089 CGEM_NET_CFG_FULL_DUPLEX | CGEM_NET_CFG_SPEED100);
1091 /* Enable receive checksum offloading? */
1092 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
1093 sc->net_cfg_shadow |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
1095 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1097 /* Program DMA Config Register. */
1098 dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) |
1099 CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
1100 CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
1101 CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 |
1103 CGEM_DMA_CFG_ADDR_BUS_64 |
1105 CGEM_DMA_CFG_DISC_WHEN_NO_AHB;
1107 /* Enable transmit checksum offloading? */
1108 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
1109 dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN;
1111 WR4(sc, CGEM_DMA_CFG, dma_cfg);
1113 /* Write the rx and tx descriptor ring addresses to the QBAR regs. */
1114 WR4(sc, CGEM_RX_QBAR, (uint32_t)sc->rxring_physaddr);
1115 WR4(sc, CGEM_TX_QBAR, (uint32_t)sc->txring_physaddr);
1117 WR4(sc, CGEM_RX_QBAR_HI, (uint32_t)(sc->rxring_physaddr >> 32));
1118 WR4(sc, CGEM_TX_QBAR_HI, (uint32_t)(sc->txring_physaddr >> 32));
1121 /* Enable rx and tx. */
1122 sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
1123 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
1125 /* Set receive address in case it changed. */
1126 WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
1127 (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
1128 WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
1130 /* Set up interrupts. */
1131 WR4(sc, CGEM_INTR_EN, CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN |
1132 CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ |
1133 CGEM_INTR_HRESP_NOT_OK);
1136 /* Turn on interface and load up receive ring with buffers. */
1138 cgem_init_locked(struct cgem_softc *sc)
1140 struct mii_data *mii;
1142 CGEM_ASSERT_LOCKED(sc);
1144 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0)
1148 cgem_fill_rqueue(sc);
1150 if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
1152 if (sc->miibus != NULL) {
1153 mii = device_get_softc(sc->miibus);
1157 callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
1161 cgem_init(void *arg)
1163 struct cgem_softc *sc = (struct cgem_softc *)arg;
1166 cgem_init_locked(sc);
1170 /* Turn off interface. Free up any buffers in transmit or receive queues. */
1172 cgem_stop(struct cgem_softc *sc)
1176 CGEM_ASSERT_LOCKED(sc);
1178 callout_stop(&sc->tick_ch);
1180 /* Shut down hardware. */
1183 /* Clear out transmit queue. */
1184 memset(sc->txring, 0, CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc));
1185 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
1186 sc->txring[i].ctl = CGEM_TXDESC_USED;
1187 if (sc->txring_m[i]) {
1188 /* Unload and destroy dmamap. */
1189 bus_dmamap_unload(sc->mbuf_dma_tag,
1190 sc->txring_m_dmamap[i]);
1191 bus_dmamap_destroy(sc->mbuf_dma_tag,
1192 sc->txring_m_dmamap[i]);
1193 sc->txring_m_dmamap[i] = NULL;
1194 m_freem(sc->txring_m[i]);
1195 sc->txring_m[i] = NULL;
1198 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
1200 sc->txring_hd_ptr = 0;
1201 sc->txring_tl_ptr = 0;
1202 sc->txring_queued = 0;
1204 /* Clear out receive queue. */
1205 memset(sc->rxring, 0, CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc));
1206 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
1207 sc->rxring[i].addr = CGEM_RXDESC_OWN;
1208 if (sc->rxring_m[i]) {
1209 /* Unload and destroy dmamap. */
1210 bus_dmamap_unload(sc->mbuf_dma_tag,
1211 sc->rxring_m_dmamap[i]);
1212 bus_dmamap_destroy(sc->mbuf_dma_tag,
1213 sc->rxring_m_dmamap[i]);
1214 sc->rxring_m_dmamap[i] = NULL;
1216 m_freem(sc->rxring_m[i]);
1217 sc->rxring_m[i] = NULL;
1220 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
1222 sc->rxring_hd_ptr = 0;
1223 sc->rxring_tl_ptr = 0;
1224 sc->rxring_queued = 0;
1226 /* Force next statchg or linkchg to program net config register. */
1227 sc->mii_media_active = 0;
1231 cgem_ioctl(if_t ifp, u_long cmd, caddr_t data)
1233 struct cgem_softc *sc = if_getsoftc(ifp);
1234 struct ifreq *ifr = (struct ifreq *)data;
1235 struct mii_data *mii;
1236 int error = 0, mask;
1241 if ((if_getflags(ifp) & IFF_UP) != 0) {
1242 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1243 if (((if_getflags(ifp) ^ sc->if_old_flags) &
1244 (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1248 cgem_init_locked(sc);
1250 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1251 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1254 sc->if_old_flags = if_getflags(ifp);
1260 /* Set up multi-cast filters. */
1261 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1270 if (sc->miibus == NULL)
1272 mii = device_get_softc(sc->miibus);
1273 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1278 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
1280 if ((mask & IFCAP_TXCSUM) != 0) {
1281 if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) {
1282 /* Turn on TX checksumming. */
1283 if_setcapenablebit(ifp, IFCAP_TXCSUM |
1284 IFCAP_TXCSUM_IPV6, 0);
1285 if_sethwassistbits(ifp, CGEM_CKSUM_ASSIST, 0);
1287 WR4(sc, CGEM_DMA_CFG,
1288 RD4(sc, CGEM_DMA_CFG) |
1289 CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1291 /* Turn off TX checksumming. */
1292 if_setcapenablebit(ifp, 0, IFCAP_TXCSUM |
1294 if_sethwassistbits(ifp, 0, CGEM_CKSUM_ASSIST);
1296 WR4(sc, CGEM_DMA_CFG,
1297 RD4(sc, CGEM_DMA_CFG) &
1298 ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1301 if ((mask & IFCAP_RXCSUM) != 0) {
1302 if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) {
1303 /* Turn on RX checksumming. */
1304 if_setcapenablebit(ifp, IFCAP_RXCSUM |
1305 IFCAP_RXCSUM_IPV6, 0);
1306 sc->net_cfg_shadow |=
1307 CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
1308 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1310 /* Turn off RX checksumming. */
1311 if_setcapenablebit(ifp, 0, IFCAP_RXCSUM |
1313 sc->net_cfg_shadow &=
1314 ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
1315 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1318 if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) ==
1319 (IFCAP_RXCSUM | IFCAP_TXCSUM))
1320 if_setcapenablebit(ifp, IFCAP_VLAN_HWCSUM, 0);
1322 if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWCSUM);
1327 error = ether_ioctl(ifp, cmd, data);
1334 /* MII bus support routines.
1337 cgem_ifmedia_upd(if_t ifp)
1339 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
1340 struct mii_data *mii;
1341 struct mii_softc *miisc;
1344 mii = device_get_softc(sc->miibus);
1346 if ((if_getflags(ifp) & IFF_UP) != 0) {
1347 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1349 error = mii_mediachg(mii);
1357 cgem_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
1359 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
1360 struct mii_data *mii;
1362 mii = device_get_softc(sc->miibus);
1365 ifmr->ifm_active = mii->mii_media_active;
1366 ifmr->ifm_status = mii->mii_media_status;
1371 cgem_miibus_readreg(device_t dev, int phy, int reg)
1373 struct cgem_softc *sc = device_get_softc(dev);
1376 WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 |
1377 CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_READ |
1378 (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1379 (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT));
1381 /* Wait for completion. */
1383 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1385 if (++tries > 200) {
1386 device_printf(dev, "phy read timeout: %d\n", reg);
1391 val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK;
1393 if (reg == MII_EXTSR)
1395 * MAC does not support half-duplex at gig speeds.
1396 * Let mii(4) exclude the capability.
1398 val &= ~(EXTSR_1000XHDX | EXTSR_1000THDX);
1404 cgem_miibus_writereg(device_t dev, int phy, int reg, int data)
1406 struct cgem_softc *sc = device_get_softc(dev);
1409 WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 |
1410 CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_WRITE |
1411 (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1412 (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) |
1413 (data & CGEM_PHY_MAINT_DATA_MASK));
1415 /* Wait for completion. */
1417 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1419 if (++tries > 200) {
1420 device_printf(dev, "phy write timeout: %d\n", reg);
1429 cgem_miibus_statchg(device_t dev)
1431 struct cgem_softc *sc = device_get_softc(dev);
1432 struct mii_data *mii = device_get_softc(sc->miibus);
1434 CGEM_ASSERT_LOCKED(sc);
1436 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1437 (IFM_ACTIVE | IFM_AVALID) &&
1438 sc->mii_media_active != mii->mii_media_active)
1439 cgem_mediachange(sc, mii);
1443 cgem_miibus_linkchg(device_t dev)
1445 struct cgem_softc *sc = device_get_softc(dev);
1446 struct mii_data *mii = device_get_softc(sc->miibus);
1448 CGEM_ASSERT_LOCKED(sc);
1450 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1451 (IFM_ACTIVE | IFM_AVALID) &&
1452 sc->mii_media_active != mii->mii_media_active)
1453 cgem_mediachange(sc, mii);
1457 * Overridable weak symbol cgem_set_ref_clk(). This allows platforms to
1458 * provide a function to set the cgem's reference clock.
1461 cgem_default_set_ref_clk(int unit, int frequency)
1466 __weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk);
1468 /* Call to set reference clock and network config bits according to media. */
1470 cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii)
1474 CGEM_ASSERT_LOCKED(sc);
1476 /* Update hardware to reflect media. */
1477 sc->net_cfg_shadow &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
1478 CGEM_NET_CFG_FULL_DUPLEX);
1480 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1482 sc->net_cfg_shadow |= (CGEM_NET_CFG_SPEED100 |
1483 CGEM_NET_CFG_GIGE_EN);
1484 ref_clk_freq = 125000000;
1487 sc->net_cfg_shadow |= CGEM_NET_CFG_SPEED100;
1488 ref_clk_freq = 25000000;
1491 ref_clk_freq = 2500000;
1494 if ((mii->mii_media_active & IFM_FDX) != 0)
1495 sc->net_cfg_shadow |= CGEM_NET_CFG_FULL_DUPLEX;
1497 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1499 #ifdef EXT_RESOURCES
1500 if (sc->ref_clk != NULL) {
1502 if (clk_set_freq(sc->ref_clk, ref_clk_freq, 0))
1503 device_printf(sc->dev, "could not set ref clk to %d\n",
1508 /* Set the reference clock if necessary. */
1509 if (cgem_set_ref_clk(sc->ref_clk_num, ref_clk_freq))
1510 device_printf(sc->dev,
1511 "cgem_mediachange: could not set ref clk%d to %d.\n",
1512 sc->ref_clk_num, ref_clk_freq);
1515 sc->mii_media_active = mii->mii_media_active;
1519 cgem_add_sysctls(device_t dev)
1521 struct cgem_softc *sc = device_get_softc(dev);
1522 struct sysctl_ctx_list *ctx;
1523 struct sysctl_oid_list *child;
1524 struct sysctl_oid *tree;
1526 ctx = device_get_sysctl_ctx(dev);
1527 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
1529 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW,
1530 &sc->rxbufs, 0, "Number receive buffers to provide");
1532 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW,
1533 &sc->rxhangwar, 0, "Enable receive hang work-around");
1535 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD,
1536 &sc->rxoverruns, 0, "Receive overrun events");
1538 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD,
1539 &sc->rxnobufs, 0, "Receive buf queue empty events");
1541 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD,
1542 &sc->rxdmamapfails, 0, "Receive DMA map failures");
1544 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD,
1545 &sc->txfull, 0, "Transmit ring full events");
1547 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD,
1548 &sc->txdmamapfails, 0, "Transmit DMA map failures");
1550 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD,
1551 &sc->txdefrags, 0, "Transmit m_defrag() calls");
1553 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD,
1554 &sc->txdefragfails, 0, "Transmit m_defrag() failures");
1556 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
1557 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "GEM statistics");
1558 child = SYSCTL_CHILDREN(tree);
1560 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD,
1561 &sc->stats.tx_bytes, "Total bytes transmitted");
1563 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD,
1564 &sc->stats.tx_frames, 0, "Total frames transmitted");
1566 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD,
1567 &sc->stats.tx_frames_bcast, 0,
1568 "Number broadcast frames transmitted");
1570 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD,
1571 &sc->stats.tx_frames_multi, 0,
1572 "Number multicast frames transmitted");
1574 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause",
1575 CTLFLAG_RD, &sc->stats.tx_frames_pause, 0,
1576 "Number pause frames transmitted");
1578 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD,
1579 &sc->stats.tx_frames_64b, 0,
1580 "Number frames transmitted of size 64 bytes or less");
1582 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD,
1583 &sc->stats.tx_frames_65to127b, 0,
1584 "Number frames transmitted of size 65-127 bytes");
1586 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b",
1587 CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0,
1588 "Number frames transmitted of size 128-255 bytes");
1590 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b",
1591 CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0,
1592 "Number frames transmitted of size 256-511 bytes");
1594 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b",
1595 CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0,
1596 "Number frames transmitted of size 512-1023 bytes");
1598 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b",
1599 CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0,
1600 "Number frames transmitted of size 1024-1536 bytes");
1602 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs",
1603 CTLFLAG_RD, &sc->stats.tx_under_runs, 0,
1604 "Number transmit under-run events");
1606 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn",
1607 CTLFLAG_RD, &sc->stats.tx_single_collisn, 0,
1608 "Number single-collision transmit frames");
1610 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn",
1611 CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0,
1612 "Number multi-collision transmit frames");
1614 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn",
1615 CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0,
1616 "Number excessive collision transmit frames");
1618 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn",
1619 CTLFLAG_RD, &sc->stats.tx_late_collisn, 0,
1620 "Number late-collision transmit frames");
1622 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames",
1623 CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0,
1624 "Number deferred transmit frames");
1626 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs",
1627 CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0,
1628 "Number carrier sense errors on transmit");
1630 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD,
1631 &sc->stats.rx_bytes, "Total bytes received");
1633 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD,
1634 &sc->stats.rx_frames, 0, "Total frames received");
1636 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast",
1637 CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0,
1638 "Number broadcast frames received");
1640 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi",
1641 CTLFLAG_RD, &sc->stats.rx_frames_multi, 0,
1642 "Number multicast frames received");
1644 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause",
1645 CTLFLAG_RD, &sc->stats.rx_frames_pause, 0,
1646 "Number pause frames received");
1648 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b",
1649 CTLFLAG_RD, &sc->stats.rx_frames_64b, 0,
1650 "Number frames received of size 64 bytes or less");
1652 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b",
1653 CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0,
1654 "Number frames received of size 65-127 bytes");
1656 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b",
1657 CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0,
1658 "Number frames received of size 128-255 bytes");
1660 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b",
1661 CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0,
1662 "Number frames received of size 256-511 bytes");
1664 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b",
1665 CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0,
1666 "Number frames received of size 512-1023 bytes");
1668 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b",
1669 CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0,
1670 "Number frames received of size 1024-1536 bytes");
1672 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize",
1673 CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0,
1674 "Number undersize frames received");
1676 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize",
1677 CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0,
1678 "Number oversize frames received");
1680 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber",
1681 CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0,
1682 "Number jabber frames received");
1684 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs",
1685 CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0,
1686 "Number frames received with FCS errors");
1688 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs",
1689 CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0,
1690 "Number frames received with length errors");
1692 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs",
1693 CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0,
1694 "Number receive symbol errors");
1696 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs",
1697 CTLFLAG_RD, &sc->stats.rx_align_errs, 0,
1698 "Number receive alignment errors");
1700 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs",
1701 CTLFLAG_RD, &sc->stats.rx_resource_errs, 0,
1702 "Number frames received when no rx buffer available");
1704 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs",
1705 CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0,
1706 "Number frames received but not copied due to receive overrun");
1708 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs",
1709 CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0,
1710 "Number frames received with IP header checksum errors");
1712 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs",
1713 CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0,
1714 "Number frames received with TCP checksum errors");
1716 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs",
1717 CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0,
1718 "Number frames received with UDP checksum errors");
1722 cgem_probe(device_t dev)
1725 if (!ofw_bus_status_okay(dev))
1728 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
1731 device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface");
1736 cgem_attach(device_t dev)
1738 struct cgem_softc *sc = device_get_softc(dev);
1741 u_char eaddr[ETHER_ADDR_LEN];
1743 #ifndef EXT_RESOURCES
1751 /* Key off of compatible string and set hardware-specific options. */
1752 hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
1753 if (hwtype == HWTYPE_ZYNQMP)
1755 if (hwtype == HWTYPE_ZYNQ)
1758 #ifdef EXT_RESOURCES
1759 if (hwtype == HWTYPE_ZYNQ || hwtype == HWTYPE_ZYNQMP) {
1760 if (clk_get_by_ofw_name(dev, 0, "tx_clk", &sc->ref_clk) != 0)
1762 "could not retrieve reference clock.\n");
1763 else if (clk_enable(sc->ref_clk) != 0)
1764 device_printf(dev, "could not enable clock.\n");
1766 else if (hwtype == HWTYPE_SIFIVE_FU540) {
1767 if (clk_get_by_ofw_name(dev, 0, "pclk", &sc->ref_clk) != 0)
1769 "could not retrieve reference clock.\n");
1770 else if (clk_enable(sc->ref_clk) != 0)
1771 device_printf(dev, "could not enable clock.\n");
1774 /* Get reference clock number and base divider from fdt. */
1775 node = ofw_bus_get_node(dev);
1776 sc->ref_clk_num = 0;
1777 if (OF_getprop(node, "ref-clock-num", &cell, sizeof(cell)) > 0)
1778 sc->ref_clk_num = fdt32_to_cpu(cell);
1781 /* Get memory resource. */
1783 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1785 if (sc->mem_res == NULL) {
1786 device_printf(dev, "could not allocate memory resources.\n");
1790 /* Get IRQ resource. */
1792 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1794 if (sc->irq_res == NULL) {
1795 device_printf(dev, "could not allocate interrupt resource.\n");
1800 /* Set up ifnet structure. */
1801 ifp = sc->ifp = if_alloc(IFT_ETHER);
1803 device_printf(dev, "could not allocate ifnet structure\n");
1807 if_setsoftc(ifp, sc);
1808 if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev));
1809 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1810 if_setinitfn(ifp, cgem_init);
1811 if_setioctlfn(ifp, cgem_ioctl);
1812 if_setstartfn(ifp, cgem_start);
1813 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
1814 IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM, 0);
1815 if_setsendqlen(ifp, CGEM_NUM_TX_DESCS);
1816 if_setsendqready(ifp);
1818 /* Disable hardware checksumming by default. */
1819 if_sethwassist(ifp, 0);
1820 if_setcapenable(ifp, if_getcapabilities(ifp) &
1821 ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM));
1823 sc->if_old_flags = if_getflags(ifp);
1824 sc->rxbufs = DEFAULT_NUM_RX_BUFS;
1826 /* Reset hardware. */
1831 /* Attach phy to mii bus. */
1832 err = mii_attach(dev, &sc->miibus, ifp,
1833 cgem_ifmedia_upd, cgem_ifmedia_sts, BMSR_DEFCAPMASK,
1834 MII_PHY_ANY, MII_OFFSET_ANY, 0);
1836 device_printf(dev, "warning: attaching PHYs failed\n");
1838 /* Set up TX and RX descriptor area. */
1839 err = cgem_setup_descs(sc);
1841 device_printf(dev, "could not set up dma mem for descs.\n");
1846 /* Get a MAC address. */
1847 cgem_get_mac(sc, eaddr);
1850 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1852 ether_ifattach(ifp, eaddr);
1854 err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
1855 INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
1857 device_printf(dev, "could not set interrupt handler.\n");
1858 ether_ifdetach(ifp);
1863 cgem_add_sysctls(dev);
1869 cgem_detach(device_t dev)
1871 struct cgem_softc *sc = device_get_softc(dev);
1877 if (device_is_attached(dev)) {
1881 callout_drain(&sc->tick_ch);
1882 if_setflagbits(sc->ifp, 0, IFF_UP);
1883 ether_ifdetach(sc->ifp);
1886 if (sc->miibus != NULL) {
1887 device_delete_child(dev, sc->miibus);
1891 /* Release resources. */
1892 if (sc->mem_res != NULL) {
1893 bus_release_resource(dev, SYS_RES_MEMORY,
1894 rman_get_rid(sc->mem_res), sc->mem_res);
1897 if (sc->irq_res != NULL) {
1899 bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1900 bus_release_resource(dev, SYS_RES_IRQ,
1901 rman_get_rid(sc->irq_res), sc->irq_res);
1905 /* Release DMA resources. */
1906 if (sc->rxring != NULL) {
1907 if (sc->rxring_physaddr != 0) {
1908 bus_dmamap_unload(sc->desc_dma_tag,
1909 sc->rxring_dma_map);
1910 sc->rxring_physaddr = 0;
1911 sc->txring_physaddr = 0;
1912 sc->null_qs_physaddr = 0;
1914 bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
1915 sc->rxring_dma_map);
1920 for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
1921 if (sc->rxring_m_dmamap[i] != NULL) {
1922 bus_dmamap_destroy(sc->mbuf_dma_tag,
1923 sc->rxring_m_dmamap[i]);
1924 sc->rxring_m_dmamap[i] = NULL;
1926 for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
1927 if (sc->txring_m_dmamap[i] != NULL) {
1928 bus_dmamap_destroy(sc->mbuf_dma_tag,
1929 sc->txring_m_dmamap[i]);
1930 sc->txring_m_dmamap[i] = NULL;
1933 if (sc->desc_dma_tag != NULL) {
1934 bus_dma_tag_destroy(sc->desc_dma_tag);
1935 sc->desc_dma_tag = NULL;
1937 if (sc->mbuf_dma_tag != NULL) {
1938 bus_dma_tag_destroy(sc->mbuf_dma_tag);
1939 sc->mbuf_dma_tag = NULL;
1942 #ifdef EXT_RESOURCES
1943 if (sc->ref_clk != NULL) {
1944 clk_release(sc->ref_clk);
1949 bus_generic_detach(dev);
1951 CGEM_LOCK_DESTROY(sc);
1956 static device_method_t cgem_methods[] = {
1957 /* Device interface */
1958 DEVMETHOD(device_probe, cgem_probe),
1959 DEVMETHOD(device_attach, cgem_attach),
1960 DEVMETHOD(device_detach, cgem_detach),
1963 DEVMETHOD(miibus_readreg, cgem_miibus_readreg),
1964 DEVMETHOD(miibus_writereg, cgem_miibus_writereg),
1965 DEVMETHOD(miibus_statchg, cgem_miibus_statchg),
1966 DEVMETHOD(miibus_linkchg, cgem_miibus_linkchg),
1971 static driver_t cgem_driver = {
1974 sizeof(struct cgem_softc),
1977 DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL);
1978 DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL);
1979 MODULE_DEPEND(cgem, miibus, 1, 1, 1);
1980 MODULE_DEPEND(cgem, ether, 1, 1, 1);
1981 SIMPLEBUS_PNP_INFO(compat_data);