]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/cadence/if_cgem.c
u3g(4): Add new USB IDs.
[FreeBSD/FreeBSD.git] / sys / dev / cadence / if_cgem.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2012-2014 Thomas Skibo <thomasskibo@yahoo.com>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 /*
30  * A network interface driver for Cadence GEM Gigabit Ethernet
31  * interface such as the one used in Xilinx Zynq-7000 SoC.
32  *
33  * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
34  * (v1.4) November 16, 2012.  Xilinx doc UG585.  GEM is covered in Ch. 16
35  * and register definitions are in appendix B.18.
36  */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/bus.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/module.h>
48 #include <sys/rman.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
52
53 #include <machine/bus.h>
54
55 #include <net/ethernet.h>
56 #include <net/if.h>
57 #include <net/if_arp.h>
58 #include <net/if_dl.h>
59 #include <net/if_media.h>
60 #include <net/if_mib.h>
61 #include <net/if_types.h>
62
63 #ifdef INET
64 #include <netinet/in.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in_var.h>
67 #include <netinet/ip.h>
68 #endif
69
70 #include <net/bpf.h>
71 #include <net/bpfdesc.h>
72
73 #include <dev/fdt/fdt_common.h>
74 #include <dev/ofw/ofw_bus.h>
75 #include <dev/ofw/ofw_bus_subr.h>
76
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 #ifdef EXT_RESOURCES
81 #include <dev/extres/clk/clk.h>
82 #endif
83
84 #if BUS_SPACE_MAXADDR > BUS_SPACE_MAXADDR_32BIT
85 #define CGEM64
86 #endif
87
88 #include <dev/cadence/if_cgem_hw.h>
89
90 #include "miibus_if.h"
91
92 #define IF_CGEM_NAME "cgem"
93
94 #define CGEM_NUM_RX_DESCS       512     /* size of receive descriptor ring */
95 #define CGEM_NUM_TX_DESCS       512     /* size of transmit descriptor ring */
96
97 /* Default for sysctl rxbufs.  Must be < CGEM_NUM_RX_DESCS of course. */
98 #define DEFAULT_NUM_RX_BUFS     256     /* number of receive bufs to queue. */
99
100 #define TX_MAX_DMA_SEGS         8       /* maximum segs in a tx mbuf dma */
101
102 #define CGEM_CKSUM_ASSIST       (CSUM_IP | CSUM_TCP | CSUM_UDP | \
103                                  CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
104
105 #define HWTYPE_GENERIC_GEM      1
106 #define HWTYPE_ZYNQ             2
107 #define HWTYPE_ZYNQMP           3
108 #define HWTYPE_SIFIVE           4
109
110 static struct ofw_compat_data compat_data[] = {
111         { "cdns,zynq-gem",              HWTYPE_ZYNQ },
112         { "cdns,zynqmp-gem",            HWTYPE_ZYNQMP },
113         { "sifive,fu540-c000-gem",      HWTYPE_SIFIVE },
114         { "sifive,fu740-c000-gem",      HWTYPE_SIFIVE },
115         { "cdns,gem",                   HWTYPE_GENERIC_GEM },
116         { "cadence,gem",                HWTYPE_GENERIC_GEM },
117         { NULL,                         0 }
118 };
119
120 struct cgem_softc {
121         if_t                    ifp;
122         struct mtx              sc_mtx;
123         device_t                dev;
124         device_t                miibus;
125         u_int                   mii_media_active;       /* last active media */
126         int                     if_old_flags;
127         struct resource         *mem_res;
128         struct resource         *irq_res;
129         void                    *intrhand;
130         struct callout          tick_ch;
131         uint32_t                net_ctl_shadow;
132         uint32_t                net_cfg_shadow;
133 #ifdef EXT_RESOURCES
134         clk_t                   ref_clk;
135 #else
136         int                     ref_clk_num;
137 #endif
138         int                     neednullqs;
139
140         bus_dma_tag_t           desc_dma_tag;
141         bus_dma_tag_t           mbuf_dma_tag;
142
143         /* receive descriptor ring */
144         struct cgem_rx_desc     *rxring;
145         bus_addr_t              rxring_physaddr;
146         struct mbuf             *rxring_m[CGEM_NUM_RX_DESCS];
147         bus_dmamap_t            rxring_m_dmamap[CGEM_NUM_RX_DESCS];
148         int                     rxring_hd_ptr;  /* where to put rcv bufs */
149         int                     rxring_tl_ptr;  /* where to get receives */
150         int                     rxring_queued;  /* how many rcv bufs queued */
151         bus_dmamap_t            rxring_dma_map;
152         int                     rxbufs;         /* tunable number rcv bufs */
153         int                     rxhangwar;      /* rx hang work-around */
154         u_int                   rxoverruns;     /* rx overruns */
155         u_int                   rxnobufs;       /* rx buf ring empty events */
156         u_int                   rxdmamapfails;  /* rx dmamap failures */
157         uint32_t                rx_frames_prev;
158
159         /* transmit descriptor ring */
160         struct cgem_tx_desc     *txring;
161         bus_addr_t              txring_physaddr;
162         struct mbuf             *txring_m[CGEM_NUM_TX_DESCS];
163         bus_dmamap_t            txring_m_dmamap[CGEM_NUM_TX_DESCS];
164         int                     txring_hd_ptr;  /* where to put next xmits */
165         int                     txring_tl_ptr;  /* next xmit mbuf to free */
166         int                     txring_queued;  /* num xmits segs queued */
167         u_int                   txfull;         /* tx ring full events */
168         u_int                   txdefrags;      /* tx calls to m_defrag() */
169         u_int                   txdefragfails;  /* tx m_defrag() failures */
170         u_int                   txdmamapfails;  /* tx dmamap failures */
171
172         /* null descriptor rings */
173         void                    *null_qs;
174         bus_addr_t              null_qs_physaddr;
175
176         /* hardware provided statistics */
177         struct cgem_hw_stats {
178                 uint64_t                tx_bytes;
179                 uint32_t                tx_frames;
180                 uint32_t                tx_frames_bcast;
181                 uint32_t                tx_frames_multi;
182                 uint32_t                tx_frames_pause;
183                 uint32_t                tx_frames_64b;
184                 uint32_t                tx_frames_65to127b;
185                 uint32_t                tx_frames_128to255b;
186                 uint32_t                tx_frames_256to511b;
187                 uint32_t                tx_frames_512to1023b;
188                 uint32_t                tx_frames_1024to1536b;
189                 uint32_t                tx_under_runs;
190                 uint32_t                tx_single_collisn;
191                 uint32_t                tx_multi_collisn;
192                 uint32_t                tx_excsv_collisn;
193                 uint32_t                tx_late_collisn;
194                 uint32_t                tx_deferred_frames;
195                 uint32_t                tx_carrier_sense_errs;
196
197                 uint64_t                rx_bytes;
198                 uint32_t                rx_frames;
199                 uint32_t                rx_frames_bcast;
200                 uint32_t                rx_frames_multi;
201                 uint32_t                rx_frames_pause;
202                 uint32_t                rx_frames_64b;
203                 uint32_t                rx_frames_65to127b;
204                 uint32_t                rx_frames_128to255b;
205                 uint32_t                rx_frames_256to511b;
206                 uint32_t                rx_frames_512to1023b;
207                 uint32_t                rx_frames_1024to1536b;
208                 uint32_t                rx_frames_undersize;
209                 uint32_t                rx_frames_oversize;
210                 uint32_t                rx_frames_jabber;
211                 uint32_t                rx_frames_fcs_errs;
212                 uint32_t                rx_frames_length_errs;
213                 uint32_t                rx_symbol_errs;
214                 uint32_t                rx_align_errs;
215                 uint32_t                rx_resource_errs;
216                 uint32_t                rx_overrun_errs;
217                 uint32_t                rx_ip_hdr_csum_errs;
218                 uint32_t                rx_tcp_csum_errs;
219                 uint32_t                rx_udp_csum_errs;
220         } stats;
221 };
222
223 #define RD4(sc, off)            (bus_read_4((sc)->mem_res, (off)))
224 #define WR4(sc, off, val)       (bus_write_4((sc)->mem_res, (off), (val)))
225 #define BARRIER(sc, off, len, flags) \
226         (bus_barrier((sc)->mem_res, (off), (len), (flags))
227
228 #define CGEM_LOCK(sc)           mtx_lock(&(sc)->sc_mtx)
229 #define CGEM_UNLOCK(sc)         mtx_unlock(&(sc)->sc_mtx)
230 #define CGEM_LOCK_INIT(sc)      mtx_init(&(sc)->sc_mtx, \
231             device_get_nameunit((sc)->dev), MTX_NETWORK_LOCK, MTX_DEF)
232 #define CGEM_LOCK_DESTROY(sc)   mtx_destroy(&(sc)->sc_mtx)
233 #define CGEM_ASSERT_LOCKED(sc)  mtx_assert(&(sc)->sc_mtx, MA_OWNED)
234
235 /* Allow platforms to optionally provide a way to set the reference clock. */
236 int cgem_set_ref_clk(int unit, int frequency);
237
238 static devclass_t cgem_devclass;
239
240 static int cgem_probe(device_t dev);
241 static int cgem_attach(device_t dev);
242 static int cgem_detach(device_t dev);
243 static void cgem_tick(void *);
244 static void cgem_intr(void *);
245
246 static void cgem_mediachange(struct cgem_softc *, struct mii_data *);
247
248 static void
249 cgem_get_mac(struct cgem_softc *sc, u_char eaddr[])
250 {
251         int i;
252         uint32_t rnd;
253
254         /* See if boot loader gave us a MAC address already. */
255         for (i = 0; i < 4; i++) {
256                 uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i));
257                 uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff;
258                 if (low != 0 || high != 0) {
259                         eaddr[0] = low & 0xff;
260                         eaddr[1] = (low >> 8) & 0xff;
261                         eaddr[2] = (low >> 16) & 0xff;
262                         eaddr[3] = (low >> 24) & 0xff;
263                         eaddr[4] = high & 0xff;
264                         eaddr[5] = (high >> 8) & 0xff;
265                         break;
266                 }
267         }
268
269         /* No MAC from boot loader?  Assign a random one. */
270         if (i == 4) {
271                 rnd = arc4random();
272
273                 eaddr[0] = 'b';
274                 eaddr[1] = 's';
275                 eaddr[2] = 'd';
276                 eaddr[3] = (rnd >> 16) & 0xff;
277                 eaddr[4] = (rnd >> 8) & 0xff;
278                 eaddr[5] = rnd & 0xff;
279
280                 device_printf(sc->dev, "no mac address found, assigning "
281                     "random: %02x:%02x:%02x:%02x:%02x:%02x\n", eaddr[0],
282                     eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]);
283         }
284
285         /* Move address to first slot and zero out the rest. */
286         WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
287             (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
288         WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
289
290         for (i = 1; i < 4; i++) {
291                 WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0);
292                 WR4(sc, CGEM_SPEC_ADDR_HI(i), 0);
293         }
294 }
295
296 /*
297  * cgem_mac_hash():  map 48-bit address to a 6-bit hash. The 6-bit hash
298  * corresponds to a bit in a 64-bit hash register.  Setting that bit in the
299  * hash register enables reception of all frames with a destination address
300  * that hashes to that 6-bit value.
301  *
302  * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
303  * Reference Manual.  Bits 0-5 in the hash are the exclusive-or of
304  * every sixth bit in the destination address.
305  */
306 static int
307 cgem_mac_hash(u_char eaddr[])
308 {
309         int hash;
310         int i, j;
311
312         hash = 0;
313         for (i = 0; i < 6; i++)
314                 for (j = i; j < 48; j += 6)
315                         if ((eaddr[j >> 3] & (1 << (j & 7))) != 0)
316                                 hash ^= (1 << i);
317
318         return hash;
319 }
320
321 static u_int
322 cgem_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
323 {
324         uint32_t *hashes = arg;
325         int index;
326
327         index = cgem_mac_hash(LLADDR(sdl));
328         if (index > 31)
329                 hashes[0] |= (1U << (index - 32));
330         else
331                 hashes[1] |= (1U << index);
332
333         return (1);
334 }
335
336 /*
337  * After any change in rx flags or multi-cast addresses, set up hash registers
338  * and net config register bits.
339  */
340 static void
341 cgem_rx_filter(struct cgem_softc *sc)
342 {
343         if_t ifp = sc->ifp;
344         uint32_t hashes[2] = { 0, 0 };
345
346         sc->net_cfg_shadow &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
347             CGEM_NET_CFG_NO_BCAST | CGEM_NET_CFG_COPY_ALL);
348
349         if ((if_getflags(ifp) & IFF_PROMISC) != 0)
350                 sc->net_cfg_shadow |= CGEM_NET_CFG_COPY_ALL;
351         else {
352                 if ((if_getflags(ifp) & IFF_BROADCAST) == 0)
353                         sc->net_cfg_shadow |= CGEM_NET_CFG_NO_BCAST;
354                 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
355                         hashes[0] = 0xffffffff;
356                         hashes[1] = 0xffffffff;
357                 } else
358                         if_foreach_llmaddr(ifp, cgem_hash_maddr, hashes);
359
360                 if (hashes[0] != 0 || hashes[1] != 0)
361                         sc->net_cfg_shadow |= CGEM_NET_CFG_MULTI_HASH_EN;
362         }
363
364         WR4(sc, CGEM_HASH_TOP, hashes[0]);
365         WR4(sc, CGEM_HASH_BOT, hashes[1]);
366         WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
367 }
368
369 /* For bus_dmamap_load() callback. */
370 static void
371 cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
372 {
373
374         if (nsegs != 1 || error != 0)
375                 return;
376         *(bus_addr_t *)arg = segs[0].ds_addr;
377 }
378
379 /* Set up null queues for priority queues we actually can't disable. */
380 static void
381 cgem_null_qs(struct cgem_softc *sc)
382 {
383         struct cgem_rx_desc *rx_desc;
384         struct cgem_tx_desc *tx_desc;
385         uint32_t queue_mask;
386         int n;
387
388         /* Read design config register 6 to determine number of queues. */
389         queue_mask = (RD4(sc, CGEM_DESIGN_CFG6) &
390             CGEM_DESIGN_CFG6_DMA_PRIO_Q_MASK) >> 1;
391         if (queue_mask == 0)
392                 return;
393
394         /* Create empty RX queue and empty TX buf queues. */
395         memset(sc->null_qs, 0, sizeof(struct cgem_rx_desc) +
396             sizeof(struct cgem_tx_desc));
397         rx_desc = sc->null_qs;
398         rx_desc->addr = CGEM_RXDESC_OWN | CGEM_RXDESC_WRAP;
399         tx_desc = (struct cgem_tx_desc *)(rx_desc + 1);
400         tx_desc->ctl = CGEM_TXDESC_USED | CGEM_TXDESC_WRAP;
401
402         /* Point all valid ring base pointers to the null queues. */
403         for (n = 1; (queue_mask & 1) != 0; n++, queue_mask >>= 1) {
404                 WR4(sc, CGEM_RX_QN_BAR(n), sc->null_qs_physaddr);
405                 WR4(sc, CGEM_TX_QN_BAR(n), sc->null_qs_physaddr +
406                     sizeof(struct cgem_rx_desc));
407         }
408 }
409
410 /* Create DMA'able descriptor rings. */
411 static int
412 cgem_setup_descs(struct cgem_softc *sc)
413 {
414         int i, err;
415         int desc_rings_size = CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc) +
416             CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc);
417
418         if (sc->neednullqs)
419                 desc_rings_size += sizeof(struct cgem_rx_desc) +
420                     sizeof(struct cgem_tx_desc);
421
422         sc->txring = NULL;
423         sc->rxring = NULL;
424
425         /* Allocate non-cached DMA space for RX and TX descriptors. */
426         err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1,
427 #ifdef CGEM64
428             1ULL << 32, /* Do not cross a 4G boundary. */
429 #else
430             0,
431 #endif
432             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
433             desc_rings_size, 1, desc_rings_size, 0,
434             busdma_lock_mutex, &sc->sc_mtx, &sc->desc_dma_tag);
435         if (err)
436                 return (err);
437
438         /* Set up a bus_dma_tag for mbufs. */
439         err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
440             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
441             TX_MAX_DMA_SEGS, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx,
442             &sc->mbuf_dma_tag);
443         if (err)
444                 return (err);
445
446         /*
447          * Allocate DMA memory. We allocate transmit, receive and null
448          * descriptor queues all at once because the hardware only provides
449          * one register for the upper 32 bits of rx and tx descriptor queues
450          * hardware addresses.
451          */
452         err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->rxring,
453             BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO,
454             &sc->rxring_dma_map);
455         if (err)
456                 return (err);
457
458         /* Load descriptor DMA memory. */
459         err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
460             (void *)sc->rxring, desc_rings_size,
461             cgem_getaddr, &sc->rxring_physaddr, BUS_DMA_NOWAIT);
462         if (err)
463                 return (err);
464
465         /* Initialize RX descriptors. */
466         for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
467                 sc->rxring[i].addr = CGEM_RXDESC_OWN;
468                 sc->rxring[i].ctl = 0;
469                 sc->rxring_m[i] = NULL;
470                 sc->rxring_m_dmamap[i] = NULL;
471         }
472         sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
473
474         sc->rxring_hd_ptr = 0;
475         sc->rxring_tl_ptr = 0;
476         sc->rxring_queued = 0;
477
478         sc->txring = (struct cgem_tx_desc *)(sc->rxring + CGEM_NUM_RX_DESCS);
479         sc->txring_physaddr = sc->rxring_physaddr + CGEM_NUM_RX_DESCS *
480             sizeof(struct cgem_rx_desc);
481
482         /* Initialize TX descriptor ring. */
483         for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
484                 sc->txring[i].addr = 0;
485                 sc->txring[i].ctl = CGEM_TXDESC_USED;
486                 sc->txring_m[i] = NULL;
487                 sc->txring_m_dmamap[i] = NULL;
488         }
489         sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
490
491         sc->txring_hd_ptr = 0;
492         sc->txring_tl_ptr = 0;
493         sc->txring_queued = 0;
494
495         if (sc->neednullqs) {
496                 sc->null_qs = (void *)(sc->txring + CGEM_NUM_TX_DESCS);
497                 sc->null_qs_physaddr = sc->txring_physaddr +
498                     CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc);
499
500                 cgem_null_qs(sc);
501         }
502
503         return (0);
504 }
505
506 /* Fill receive descriptor ring with mbufs. */
507 static void
508 cgem_fill_rqueue(struct cgem_softc *sc)
509 {
510         struct mbuf *m = NULL;
511         bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
512         int nsegs;
513
514         CGEM_ASSERT_LOCKED(sc);
515
516         while (sc->rxring_queued < sc->rxbufs) {
517                 /* Get a cluster mbuf. */
518                 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
519                 if (m == NULL)
520                         break;
521
522                 m->m_len = MCLBYTES;
523                 m->m_pkthdr.len = MCLBYTES;
524                 m->m_pkthdr.rcvif = sc->ifp;
525
526                 /* Load map and plug in physical address. */
527                 if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
528                     &sc->rxring_m_dmamap[sc->rxring_hd_ptr])) {
529                         sc->rxdmamapfails++;
530                         m_free(m);
531                         break;
532                 }
533                 if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
534                     sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
535                     segs, &nsegs, BUS_DMA_NOWAIT)) {
536                         sc->rxdmamapfails++;
537                         bus_dmamap_destroy(sc->mbuf_dma_tag,
538                                    sc->rxring_m_dmamap[sc->rxring_hd_ptr]);
539                         sc->rxring_m_dmamap[sc->rxring_hd_ptr] = NULL;
540                         m_free(m);
541                         break;
542                 }
543                 sc->rxring_m[sc->rxring_hd_ptr] = m;
544
545                 /* Sync cache with receive buffer. */
546                 bus_dmamap_sync(sc->mbuf_dma_tag,
547                     sc->rxring_m_dmamap[sc->rxring_hd_ptr],
548                     BUS_DMASYNC_PREREAD);
549
550                 /* Write rx descriptor and increment head pointer. */
551                 sc->rxring[sc->rxring_hd_ptr].ctl = 0;
552 #ifdef CGEM64
553                 sc->rxring[sc->rxring_hd_ptr].addrhi = segs[0].ds_addr >> 32;
554 #endif
555                 if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
556                         sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
557                             CGEM_RXDESC_WRAP;
558                         sc->rxring_hd_ptr = 0;
559                 } else
560                         sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
561
562                 sc->rxring_queued++;
563         }
564 }
565
566 /* Pull received packets off of receive descriptor ring. */
567 static void
568 cgem_recv(struct cgem_softc *sc)
569 {
570         if_t ifp = sc->ifp;
571         struct mbuf *m, *m_hd, **m_tl;
572         uint32_t ctl;
573
574         CGEM_ASSERT_LOCKED(sc);
575
576         /* Pick up all packets in which the OWN bit is set. */
577         m_hd = NULL;
578         m_tl = &m_hd;
579         while (sc->rxring_queued > 0 &&
580             (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
581                 ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
582
583                 /* Grab filled mbuf. */
584                 m = sc->rxring_m[sc->rxring_tl_ptr];
585                 sc->rxring_m[sc->rxring_tl_ptr] = NULL;
586
587                 /* Sync cache with receive buffer. */
588                 bus_dmamap_sync(sc->mbuf_dma_tag,
589                     sc->rxring_m_dmamap[sc->rxring_tl_ptr],
590                     BUS_DMASYNC_POSTREAD);
591
592                 /* Unload and destroy dmamap. */
593                 bus_dmamap_unload(sc->mbuf_dma_tag,
594                     sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
595                 bus_dmamap_destroy(sc->mbuf_dma_tag,
596                     sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
597                 sc->rxring_m_dmamap[sc->rxring_tl_ptr] = NULL;
598
599                 /* Increment tail pointer. */
600                 if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS)
601                         sc->rxring_tl_ptr = 0;
602                 sc->rxring_queued--;
603
604                 /*
605                  * Check FCS and make sure entire packet landed in one mbuf
606                  * cluster (which is much bigger than the largest ethernet
607                  * packet).
608                  */
609                 if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 ||
610                     (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) !=
611                     (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) {
612                         /* discard. */
613                         m_free(m);
614                         if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
615                         continue;
616                 }
617
618                 /* Ready it to hand off to upper layers. */
619                 m->m_data += ETHER_ALIGN;
620                 m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK);
621                 m->m_pkthdr.rcvif = ifp;
622                 m->m_pkthdr.len = m->m_len;
623
624                 /*
625                  * Are we using hardware checksumming?  Check the status in the
626                  * receive descriptor.
627                  */
628                 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
629                         /* TCP or UDP checks out, IP checks out too. */
630                         if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
631                             CGEM_RXDESC_CKSUM_STAT_TCP_GOOD ||
632                             (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
633                             CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) {
634                                 m->m_pkthdr.csum_flags |=
635                                     CSUM_IP_CHECKED | CSUM_IP_VALID |
636                                     CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
637                                 m->m_pkthdr.csum_data = 0xffff;
638                         } else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
639                             CGEM_RXDESC_CKSUM_STAT_IP_GOOD) {
640                                 /* Only IP checks out. */
641                                 m->m_pkthdr.csum_flags |=
642                                     CSUM_IP_CHECKED | CSUM_IP_VALID;
643                                 m->m_pkthdr.csum_data = 0xffff;
644                         }
645                 }
646
647                 /* Queue it up for delivery below. */
648                 *m_tl = m;
649                 m_tl = &m->m_next;
650         }
651
652         /* Replenish receive buffers. */
653         cgem_fill_rqueue(sc);
654
655         /* Unlock and send up packets. */
656         CGEM_UNLOCK(sc);
657         while (m_hd != NULL) {
658                 m = m_hd;
659                 m_hd = m_hd->m_next;
660                 m->m_next = NULL;
661                 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
662                 if_input(ifp, m);
663         }
664         CGEM_LOCK(sc);
665 }
666
667 /* Find completed transmits and free their mbufs. */
668 static void
669 cgem_clean_tx(struct cgem_softc *sc)
670 {
671         struct mbuf *m;
672         uint32_t ctl;
673
674         CGEM_ASSERT_LOCKED(sc);
675
676         /* free up finished transmits. */
677         while (sc->txring_queued > 0 &&
678             ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
679             CGEM_TXDESC_USED) != 0) {
680                 /* Sync cache. */
681                 bus_dmamap_sync(sc->mbuf_dma_tag,
682                     sc->txring_m_dmamap[sc->txring_tl_ptr],
683                     BUS_DMASYNC_POSTWRITE);
684
685                 /* Unload and destroy DMA map. */
686                 bus_dmamap_unload(sc->mbuf_dma_tag,
687                     sc->txring_m_dmamap[sc->txring_tl_ptr]);
688                 bus_dmamap_destroy(sc->mbuf_dma_tag,
689                     sc->txring_m_dmamap[sc->txring_tl_ptr]);
690                 sc->txring_m_dmamap[sc->txring_tl_ptr] = NULL;
691
692                 /* Free up the mbuf. */
693                 m = sc->txring_m[sc->txring_tl_ptr];
694                 sc->txring_m[sc->txring_tl_ptr] = NULL;
695                 m_freem(m);
696
697                 /* Check the status. */
698                 if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
699                         /* Serious bus error. log to console. */
700 #ifdef CGEM64
701                         device_printf(sc->dev,
702                             "cgem_clean_tx: AHB error, addr=0x%x%08x\n",
703                             sc->txring[sc->txring_tl_ptr].addrhi,
704                             sc->txring[sc->txring_tl_ptr].addr);
705 #else
706                         device_printf(sc->dev,
707                             "cgem_clean_tx: AHB error, addr=0x%x\n",
708                             sc->txring[sc->txring_tl_ptr].addr);
709 #endif
710                 } else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
711                     CGEM_TXDESC_LATE_COLL)) != 0) {
712                         if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
713                 } else
714                         if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
715
716                 /*
717                  * If the packet spanned more than one tx descriptor, skip
718                  * descriptors until we find the end so that only
719                  * start-of-frame descriptors are processed.
720                  */
721                 while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) {
722                         if ((ctl & CGEM_TXDESC_WRAP) != 0)
723                                 sc->txring_tl_ptr = 0;
724                         else
725                                 sc->txring_tl_ptr++;
726                         sc->txring_queued--;
727
728                         ctl = sc->txring[sc->txring_tl_ptr].ctl;
729
730                         sc->txring[sc->txring_tl_ptr].ctl =
731                             ctl | CGEM_TXDESC_USED;
732                 }
733
734                 /* Next descriptor. */
735                 if ((ctl & CGEM_TXDESC_WRAP) != 0)
736                         sc->txring_tl_ptr = 0;
737                 else
738                         sc->txring_tl_ptr++;
739                 sc->txring_queued--;
740
741                 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
742         }
743 }
744
745 /* Start transmits. */
746 static void
747 cgem_start_locked(if_t ifp)
748 {
749         struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
750         struct mbuf *m;
751         bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
752         uint32_t ctl;
753         int i, nsegs, wrap, err;
754
755         CGEM_ASSERT_LOCKED(sc);
756
757         if ((if_getdrvflags(ifp) & IFF_DRV_OACTIVE) != 0)
758                 return;
759
760         for (;;) {
761                 /* Check that there is room in the descriptor ring. */
762                 if (sc->txring_queued >=
763                     CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
764                         /* Try to make room. */
765                         cgem_clean_tx(sc);
766
767                         /* Still no room? */
768                         if (sc->txring_queued >=
769                             CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
770                                 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
771                                 sc->txfull++;
772                                 break;
773                         }
774                 }
775
776                 /* Grab next transmit packet. */
777                 m = if_dequeue(ifp);
778                 if (m == NULL)
779                         break;
780
781                 /* Create and load DMA map. */
782                 if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
783                         &sc->txring_m_dmamap[sc->txring_hd_ptr])) {
784                         m_freem(m);
785                         sc->txdmamapfails++;
786                         continue;
787                 }
788                 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
789                     sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs, &nsegs,
790                     BUS_DMA_NOWAIT);
791                 if (err == EFBIG) {
792                         /* Too many segments!  defrag and try again. */
793                         struct mbuf *m2 = m_defrag(m, M_NOWAIT);
794
795                         if (m2 == NULL) {
796                                 sc->txdefragfails++;
797                                 m_freem(m);
798                                 bus_dmamap_destroy(sc->mbuf_dma_tag,
799                                     sc->txring_m_dmamap[sc->txring_hd_ptr]);
800                                 sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
801                                 continue;
802                         }
803                         m = m2;
804                         err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
805                             sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs,
806                             &nsegs, BUS_DMA_NOWAIT);
807                         sc->txdefrags++;
808                 }
809                 if (err) {
810                         /* Give up. */
811                         m_freem(m);
812                         bus_dmamap_destroy(sc->mbuf_dma_tag,
813                             sc->txring_m_dmamap[sc->txring_hd_ptr]);
814                         sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
815                         sc->txdmamapfails++;
816                         continue;
817                 }
818                 sc->txring_m[sc->txring_hd_ptr] = m;
819
820                 /* Sync tx buffer with cache. */
821                 bus_dmamap_sync(sc->mbuf_dma_tag,
822                     sc->txring_m_dmamap[sc->txring_hd_ptr],
823                     BUS_DMASYNC_PREWRITE);
824
825                 /* Set wrap flag if next packet might run off end of ring. */
826                 wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
827                     CGEM_NUM_TX_DESCS;
828
829                 /*
830                  * Fill in the TX descriptors back to front so that USED bit in
831                  * first descriptor is cleared last.
832                  */
833                 for (i = nsegs - 1; i >= 0; i--) {
834                         /* Descriptor address. */
835                         sc->txring[sc->txring_hd_ptr + i].addr =
836                             segs[i].ds_addr;
837 #ifdef CGEM64
838                         sc->txring[sc->txring_hd_ptr + i].addrhi =
839                             segs[i].ds_addr >> 32;
840 #endif
841                         /* Descriptor control word. */
842                         ctl = segs[i].ds_len;
843                         if (i == nsegs - 1) {
844                                 ctl |= CGEM_TXDESC_LAST_BUF;
845                                 if (wrap)
846                                         ctl |= CGEM_TXDESC_WRAP;
847                         }
848                         sc->txring[sc->txring_hd_ptr + i].ctl = ctl;
849
850                         if (i != 0)
851                                 sc->txring_m[sc->txring_hd_ptr + i] = NULL;
852                 }
853
854                 if (wrap)
855                         sc->txring_hd_ptr = 0;
856                 else
857                         sc->txring_hd_ptr += nsegs;
858                 sc->txring_queued += nsegs;
859
860                 /* Kick the transmitter. */
861                 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
862                     CGEM_NET_CTRL_START_TX);
863
864                 /* If there is a BPF listener, bounce a copy to him. */
865                 ETHER_BPF_MTAP(ifp, m);
866         }
867 }
868
869 static void
870 cgem_start(if_t ifp)
871 {
872         struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
873
874         CGEM_LOCK(sc);
875         cgem_start_locked(ifp);
876         CGEM_UNLOCK(sc);
877 }
878
879 static void
880 cgem_poll_hw_stats(struct cgem_softc *sc)
881 {
882         uint32_t n;
883
884         CGEM_ASSERT_LOCKED(sc);
885
886         sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT);
887         sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32;
888
889         sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX);
890         sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX);
891         sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX);
892         sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX);
893         sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX);
894         sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX);
895         sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX);
896         sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX);
897         sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX);
898         sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX);
899         sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS);
900
901         n = RD4(sc, CGEM_SINGLE_COLL_FRAMES);
902         sc->stats.tx_single_collisn += n;
903         if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
904         n = RD4(sc, CGEM_MULTI_COLL_FRAMES);
905         sc->stats.tx_multi_collisn += n;
906         if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
907         n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES);
908         sc->stats.tx_excsv_collisn += n;
909         if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
910         n = RD4(sc, CGEM_LATE_COLL);
911         sc->stats.tx_late_collisn += n;
912         if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
913
914         sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES);
915         sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS);
916
917         sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT);
918         sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32;
919
920         sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX);
921         sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX);
922         sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX);
923         sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX);
924         sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX);
925         sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX);
926         sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX);
927         sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX);
928         sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX);
929         sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX);
930         sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX);
931         sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX);
932         sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX);
933         sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS);
934         sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS);
935         sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS);
936         sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS);
937         sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS);
938         sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS);
939         sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS);
940         sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS);
941         sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS);
942 }
943
944 static void
945 cgem_tick(void *arg)
946 {
947         struct cgem_softc *sc = (struct cgem_softc *)arg;
948         struct mii_data *mii;
949
950         CGEM_ASSERT_LOCKED(sc);
951
952         /* Poll the phy. */
953         if (sc->miibus != NULL) {
954                 mii = device_get_softc(sc->miibus);
955                 mii_tick(mii);
956         }
957
958         /* Poll statistics registers. */
959         cgem_poll_hw_stats(sc);
960
961         /* Check for receiver hang. */
962         if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) {
963                 /*
964                  * Reset receiver logic by toggling RX_EN bit.  1usec
965                  * delay is necessary especially when operating at 100mbps
966                  * and 10mbps speeds.
967                  */
968                 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow &
969                     ~CGEM_NET_CTRL_RX_EN);
970                 DELAY(1);
971                 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
972         }
973         sc->rx_frames_prev = sc->stats.rx_frames;
974
975         /* Next callout in one second. */
976         callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
977 }
978
979 /* Interrupt handler. */
980 static void
981 cgem_intr(void *arg)
982 {
983         struct cgem_softc *sc = (struct cgem_softc *)arg;
984         if_t ifp = sc->ifp;
985         uint32_t istatus;
986
987         CGEM_LOCK(sc);
988
989         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
990                 CGEM_UNLOCK(sc);
991                 return;
992         }
993
994         /* Read interrupt status and immediately clear the bits. */
995         istatus = RD4(sc, CGEM_INTR_STAT);
996         WR4(sc, CGEM_INTR_STAT, istatus);
997
998         /* Packets received. */
999         if ((istatus & CGEM_INTR_RX_COMPLETE) != 0)
1000                 cgem_recv(sc);
1001
1002         /* Free up any completed transmit buffers. */
1003         cgem_clean_tx(sc);
1004
1005         /* Hresp not ok.  Something is very bad with DMA.  Try to clear. */
1006         if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) {
1007                 device_printf(sc->dev,
1008                     "cgem_intr: hresp not okay! rx_status=0x%x\n",
1009                     RD4(sc, CGEM_RX_STAT));
1010                 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK);
1011         }
1012
1013         /* Receiver overrun. */
1014         if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) {
1015                 /* Clear status bit. */
1016                 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN);
1017                 sc->rxoverruns++;
1018         }
1019
1020         /* Receiver ran out of bufs. */
1021         if ((istatus & CGEM_INTR_RX_USED_READ) != 0) {
1022                 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
1023                     CGEM_NET_CTRL_FLUSH_DPRAM_PKT);
1024                 cgem_fill_rqueue(sc);
1025                 sc->rxnobufs++;
1026         }
1027
1028         /* Restart transmitter if needed. */
1029         if (!if_sendq_empty(ifp))
1030                 cgem_start_locked(ifp);
1031
1032         CGEM_UNLOCK(sc);
1033 }
1034
1035 /* Reset hardware. */
1036 static void
1037 cgem_reset(struct cgem_softc *sc)
1038 {
1039
1040         CGEM_ASSERT_LOCKED(sc);
1041
1042         /* Determine data bus width from design configuration register. */
1043         switch (RD4(sc, CGEM_DESIGN_CFG1) &
1044             CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_MASK) {
1045         case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_64:
1046                 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_64;
1047                 break;
1048         case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_128:
1049                 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_128;
1050                 break;
1051         default:
1052                 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_32;
1053         }
1054
1055         WR4(sc, CGEM_NET_CTRL, 0);
1056         WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1057         WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
1058         WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
1059         WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
1060         WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL);
1061         WR4(sc, CGEM_HASH_BOT, 0);
1062         WR4(sc, CGEM_HASH_TOP, 0);
1063         WR4(sc, CGEM_TX_QBAR, 0);       /* manual says do this. */
1064         WR4(sc, CGEM_RX_QBAR, 0);
1065
1066         /* Get management port running even if interface is down. */
1067         sc->net_cfg_shadow |= CGEM_NET_CFG_MDC_CLK_DIV_48;
1068         WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1069
1070         sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
1071         WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
1072 }
1073
1074 /* Bring up the hardware. */
1075 static void
1076 cgem_config(struct cgem_softc *sc)
1077 {
1078         if_t ifp = sc->ifp;
1079         uint32_t dma_cfg;
1080         u_char *eaddr = if_getlladdr(ifp);
1081
1082         CGEM_ASSERT_LOCKED(sc);
1083
1084         /* Program Net Config Register. */
1085         sc->net_cfg_shadow &= (CGEM_NET_CFG_MDC_CLK_DIV_MASK |
1086             CGEM_NET_CFG_DBUS_WIDTH_MASK);
1087         sc->net_cfg_shadow |= (CGEM_NET_CFG_FCS_REMOVE |
1088             CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
1089             CGEM_NET_CFG_GIGE_EN | CGEM_NET_CFG_1536RXEN |
1090             CGEM_NET_CFG_FULL_DUPLEX | CGEM_NET_CFG_SPEED100);
1091
1092         /* Enable receive checksum offloading? */
1093         if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
1094                 sc->net_cfg_shadow |=  CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
1095
1096         WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1097
1098         /* Program DMA Config Register. */
1099         dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) |
1100             CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
1101             CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
1102             CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 |
1103 #ifdef CGEM64
1104             CGEM_DMA_CFG_ADDR_BUS_64 |
1105 #endif
1106             CGEM_DMA_CFG_DISC_WHEN_NO_AHB;
1107
1108         /* Enable transmit checksum offloading? */
1109         if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
1110                 dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN;
1111
1112         WR4(sc, CGEM_DMA_CFG, dma_cfg);
1113
1114         /* Write the rx and tx descriptor ring addresses to the QBAR regs. */
1115         WR4(sc, CGEM_RX_QBAR, (uint32_t)sc->rxring_physaddr);
1116         WR4(sc, CGEM_TX_QBAR, (uint32_t)sc->txring_physaddr);
1117 #ifdef CGEM64
1118         WR4(sc, CGEM_RX_QBAR_HI, (uint32_t)(sc->rxring_physaddr >> 32));
1119         WR4(sc, CGEM_TX_QBAR_HI, (uint32_t)(sc->txring_physaddr >> 32));
1120 #endif
1121
1122         /* Enable rx and tx. */
1123         sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
1124         WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
1125
1126         /* Set receive address in case it changed. */
1127         WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
1128             (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
1129         WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
1130
1131         /* Set up interrupts. */
1132         WR4(sc, CGEM_INTR_EN, CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN |
1133             CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ |
1134             CGEM_INTR_HRESP_NOT_OK);
1135 }
1136
1137 /* Turn on interface and load up receive ring with buffers. */
1138 static void
1139 cgem_init_locked(struct cgem_softc *sc)
1140 {
1141         struct mii_data *mii;
1142
1143         CGEM_ASSERT_LOCKED(sc);
1144
1145         if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0)
1146                 return;
1147
1148         cgem_config(sc);
1149         cgem_fill_rqueue(sc);
1150
1151         if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
1152
1153         if (sc->miibus != NULL) {
1154                 mii = device_get_softc(sc->miibus);
1155                 mii_mediachg(mii);
1156         }
1157
1158         callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
1159 }
1160
1161 static void
1162 cgem_init(void *arg)
1163 {
1164         struct cgem_softc *sc = (struct cgem_softc *)arg;
1165
1166         CGEM_LOCK(sc);
1167         cgem_init_locked(sc);
1168         CGEM_UNLOCK(sc);
1169 }
1170
1171 /* Turn off interface.  Free up any buffers in transmit or receive queues. */
1172 static void
1173 cgem_stop(struct cgem_softc *sc)
1174 {
1175         int i;
1176
1177         CGEM_ASSERT_LOCKED(sc);
1178
1179         callout_stop(&sc->tick_ch);
1180
1181         /* Shut down hardware. */
1182         cgem_reset(sc);
1183
1184         /* Clear out transmit queue. */
1185         memset(sc->txring, 0, CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc));
1186         for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
1187                 sc->txring[i].ctl = CGEM_TXDESC_USED;
1188                 if (sc->txring_m[i]) {
1189                         /* Unload and destroy dmamap. */
1190                         bus_dmamap_unload(sc->mbuf_dma_tag,
1191                             sc->txring_m_dmamap[i]);
1192                         bus_dmamap_destroy(sc->mbuf_dma_tag,
1193                             sc->txring_m_dmamap[i]);
1194                         sc->txring_m_dmamap[i] = NULL;
1195                         m_freem(sc->txring_m[i]);
1196                         sc->txring_m[i] = NULL;
1197                 }
1198         }
1199         sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
1200
1201         sc->txring_hd_ptr = 0;
1202         sc->txring_tl_ptr = 0;
1203         sc->txring_queued = 0;
1204
1205         /* Clear out receive queue. */
1206         memset(sc->rxring, 0, CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc));
1207         for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
1208                 sc->rxring[i].addr = CGEM_RXDESC_OWN;
1209                 if (sc->rxring_m[i]) {
1210                         /* Unload and destroy dmamap. */
1211                         bus_dmamap_unload(sc->mbuf_dma_tag,
1212                             sc->rxring_m_dmamap[i]);
1213                         bus_dmamap_destroy(sc->mbuf_dma_tag,
1214                             sc->rxring_m_dmamap[i]);
1215                         sc->rxring_m_dmamap[i] = NULL;
1216
1217                         m_freem(sc->rxring_m[i]);
1218                         sc->rxring_m[i] = NULL;
1219                 }
1220         }
1221         sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
1222
1223         sc->rxring_hd_ptr = 0;
1224         sc->rxring_tl_ptr = 0;
1225         sc->rxring_queued = 0;
1226
1227         /* Force next statchg or linkchg to program net config register. */
1228         sc->mii_media_active = 0;
1229 }
1230
1231 static int
1232 cgem_ioctl(if_t ifp, u_long cmd, caddr_t data)
1233 {
1234         struct cgem_softc *sc = if_getsoftc(ifp);
1235         struct ifreq *ifr = (struct ifreq *)data;
1236         struct mii_data *mii;
1237         int error = 0, mask;
1238
1239         switch (cmd) {
1240         case SIOCSIFFLAGS:
1241                 CGEM_LOCK(sc);
1242                 if ((if_getflags(ifp) & IFF_UP) != 0) {
1243                         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1244                                 if (((if_getflags(ifp) ^ sc->if_old_flags) &
1245                                     (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1246                                         cgem_rx_filter(sc);
1247                                 }
1248                         } else {
1249                                 cgem_init_locked(sc);
1250                         }
1251                 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1252                         if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1253                         cgem_stop(sc);
1254                 }
1255                 sc->if_old_flags = if_getflags(ifp);
1256                 CGEM_UNLOCK(sc);
1257                 break;
1258
1259         case SIOCADDMULTI:
1260         case SIOCDELMULTI:
1261                 /* Set up multi-cast filters. */
1262                 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1263                         CGEM_LOCK(sc);
1264                         cgem_rx_filter(sc);
1265                         CGEM_UNLOCK(sc);
1266                 }
1267                 break;
1268
1269         case SIOCSIFMEDIA:
1270         case SIOCGIFMEDIA:
1271                 if (sc->miibus == NULL)
1272                         return (ENXIO);
1273                 mii = device_get_softc(sc->miibus);
1274                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1275                 break;
1276
1277         case SIOCSIFCAP:
1278                 CGEM_LOCK(sc);
1279                 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
1280
1281                 if ((mask & IFCAP_TXCSUM) != 0) {
1282                         if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) {
1283                                 /* Turn on TX checksumming. */
1284                                 if_setcapenablebit(ifp, IFCAP_TXCSUM |
1285                                     IFCAP_TXCSUM_IPV6, 0);
1286                                 if_sethwassistbits(ifp, CGEM_CKSUM_ASSIST, 0);
1287
1288                                 WR4(sc, CGEM_DMA_CFG,
1289                                     RD4(sc, CGEM_DMA_CFG) |
1290                                     CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1291                         } else {
1292                                 /* Turn off TX checksumming. */
1293                                 if_setcapenablebit(ifp, 0, IFCAP_TXCSUM |
1294                                     IFCAP_TXCSUM_IPV6);
1295                                 if_sethwassistbits(ifp, 0, CGEM_CKSUM_ASSIST);
1296
1297                                 WR4(sc, CGEM_DMA_CFG,
1298                                     RD4(sc, CGEM_DMA_CFG) &
1299                                     ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1300                         }
1301                 }
1302                 if ((mask & IFCAP_RXCSUM) != 0) {
1303                         if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) {
1304                                 /* Turn on RX checksumming. */
1305                                 if_setcapenablebit(ifp, IFCAP_RXCSUM |
1306                                     IFCAP_RXCSUM_IPV6, 0);
1307                                 sc->net_cfg_shadow |=
1308                                     CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
1309                                 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1310                         } else {
1311                                 /* Turn off RX checksumming. */
1312                                 if_setcapenablebit(ifp, 0, IFCAP_RXCSUM |
1313                                     IFCAP_RXCSUM_IPV6);
1314                                 sc->net_cfg_shadow &=
1315                                     ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
1316                                 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1317                         }
1318                 }
1319                 if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) ==
1320                     (IFCAP_RXCSUM | IFCAP_TXCSUM))
1321                         if_setcapenablebit(ifp, IFCAP_VLAN_HWCSUM, 0);
1322                 else
1323                         if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWCSUM);
1324
1325                 CGEM_UNLOCK(sc);
1326                 break;
1327         default:
1328                 error = ether_ioctl(ifp, cmd, data);
1329                 break;
1330         }
1331
1332         return (error);
1333 }
1334
1335 /* MII bus support routines.
1336  */
1337 static int
1338 cgem_ifmedia_upd(if_t ifp)
1339 {
1340         struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
1341         struct mii_data *mii;
1342         struct mii_softc *miisc;
1343         int error = 0;
1344
1345         mii = device_get_softc(sc->miibus);
1346         CGEM_LOCK(sc);
1347         if ((if_getflags(ifp) & IFF_UP) != 0) {
1348                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1349                         PHY_RESET(miisc);
1350                 error = mii_mediachg(mii);
1351         }
1352         CGEM_UNLOCK(sc);
1353
1354         return (error);
1355 }
1356
1357 static void
1358 cgem_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
1359 {
1360         struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
1361         struct mii_data *mii;
1362
1363         mii = device_get_softc(sc->miibus);
1364         CGEM_LOCK(sc);
1365         mii_pollstat(mii);
1366         ifmr->ifm_active = mii->mii_media_active;
1367         ifmr->ifm_status = mii->mii_media_status;
1368         CGEM_UNLOCK(sc);
1369 }
1370
1371 static int
1372 cgem_miibus_readreg(device_t dev, int phy, int reg)
1373 {
1374         struct cgem_softc *sc = device_get_softc(dev);
1375         int tries, val;
1376
1377         WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 |
1378             CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_READ |
1379             (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1380             (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT));
1381
1382         /* Wait for completion. */
1383         tries=0;
1384         while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1385                 DELAY(5);
1386                 if (++tries > 200) {
1387                         device_printf(dev, "phy read timeout: %d\n", reg);
1388                         return (-1);
1389                 }
1390         }
1391
1392         val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK;
1393
1394         if (reg == MII_EXTSR)
1395                 /*
1396                  * MAC does not support half-duplex at gig speeds.
1397                  * Let mii(4) exclude the capability.
1398                  */
1399                 val &= ~(EXTSR_1000XHDX | EXTSR_1000THDX);
1400
1401         return (val);
1402 }
1403
1404 static int
1405 cgem_miibus_writereg(device_t dev, int phy, int reg, int data)
1406 {
1407         struct cgem_softc *sc = device_get_softc(dev);
1408         int tries;
1409
1410         WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 |
1411             CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_WRITE |
1412             (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1413             (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) |
1414             (data & CGEM_PHY_MAINT_DATA_MASK));
1415
1416         /* Wait for completion. */
1417         tries = 0;
1418         while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1419                 DELAY(5);
1420                 if (++tries > 200) {
1421                         device_printf(dev, "phy write timeout: %d\n", reg);
1422                         return (-1);
1423                 }
1424         }
1425
1426         return (0);
1427 }
1428
1429 static void
1430 cgem_miibus_statchg(device_t dev)
1431 {
1432         struct cgem_softc *sc  = device_get_softc(dev);
1433         struct mii_data *mii = device_get_softc(sc->miibus);
1434
1435         CGEM_ASSERT_LOCKED(sc);
1436
1437         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1438             (IFM_ACTIVE | IFM_AVALID) &&
1439             sc->mii_media_active != mii->mii_media_active)
1440                 cgem_mediachange(sc, mii);
1441 }
1442
1443 static void
1444 cgem_miibus_linkchg(device_t dev)
1445 {
1446         struct cgem_softc *sc  = device_get_softc(dev);
1447         struct mii_data *mii = device_get_softc(sc->miibus);
1448
1449         CGEM_ASSERT_LOCKED(sc);
1450
1451         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1452             (IFM_ACTIVE | IFM_AVALID) &&
1453             sc->mii_media_active != mii->mii_media_active)
1454                 cgem_mediachange(sc, mii);
1455 }
1456
1457 /*
1458  * Overridable weak symbol cgem_set_ref_clk().  This allows platforms to
1459  * provide a function to set the cgem's reference clock.
1460  */
1461 static int __used
1462 cgem_default_set_ref_clk(int unit, int frequency)
1463 {
1464
1465         return 0;
1466 }
1467 __weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk);
1468
1469 /* Call to set reference clock and network config bits according to media. */
1470 static void
1471 cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii)
1472 {
1473         int ref_clk_freq;
1474
1475         CGEM_ASSERT_LOCKED(sc);
1476
1477         /* Update hardware to reflect media. */
1478         sc->net_cfg_shadow &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
1479             CGEM_NET_CFG_FULL_DUPLEX);
1480
1481         switch (IFM_SUBTYPE(mii->mii_media_active)) {
1482         case IFM_1000_T:
1483                 sc->net_cfg_shadow |= (CGEM_NET_CFG_SPEED100 |
1484                     CGEM_NET_CFG_GIGE_EN);
1485                 ref_clk_freq = 125000000;
1486                 break;
1487         case IFM_100_TX:
1488                 sc->net_cfg_shadow |= CGEM_NET_CFG_SPEED100;
1489                 ref_clk_freq = 25000000;
1490                 break;
1491         default:
1492                 ref_clk_freq = 2500000;
1493         }
1494
1495         if ((mii->mii_media_active & IFM_FDX) != 0)
1496                 sc->net_cfg_shadow |= CGEM_NET_CFG_FULL_DUPLEX;
1497
1498         WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1499
1500 #ifdef EXT_RESOURCES
1501         if (sc->ref_clk != NULL) {
1502                 CGEM_UNLOCK(sc);
1503                 if (clk_set_freq(sc->ref_clk, ref_clk_freq, 0))
1504                         device_printf(sc->dev, "could not set ref clk to %d\n",
1505                             ref_clk_freq);
1506                 CGEM_LOCK(sc);
1507         }
1508 #else
1509         /* Set the reference clock if necessary. */
1510         if (cgem_set_ref_clk(sc->ref_clk_num, ref_clk_freq))
1511                 device_printf(sc->dev,
1512                     "cgem_mediachange: could not set ref clk%d to %d.\n",
1513                     sc->ref_clk_num, ref_clk_freq);
1514 #endif
1515
1516         sc->mii_media_active = mii->mii_media_active;
1517 }
1518
1519 static void
1520 cgem_add_sysctls(device_t dev)
1521 {
1522         struct cgem_softc *sc = device_get_softc(dev);
1523         struct sysctl_ctx_list *ctx;
1524         struct sysctl_oid_list *child;
1525         struct sysctl_oid *tree;
1526
1527         ctx = device_get_sysctl_ctx(dev);
1528         child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
1529
1530         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW,
1531             &sc->rxbufs, 0, "Number receive buffers to provide");
1532
1533         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW,
1534             &sc->rxhangwar, 0, "Enable receive hang work-around");
1535
1536         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD,
1537             &sc->rxoverruns, 0, "Receive overrun events");
1538
1539         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD,
1540             &sc->rxnobufs, 0, "Receive buf queue empty events");
1541
1542         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD,
1543             &sc->rxdmamapfails, 0, "Receive DMA map failures");
1544
1545         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD,
1546             &sc->txfull, 0, "Transmit ring full events");
1547
1548         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD,
1549             &sc->txdmamapfails, 0, "Transmit DMA map failures");
1550
1551         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD,
1552             &sc->txdefrags, 0, "Transmit m_defrag() calls");
1553
1554         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD,
1555             &sc->txdefragfails, 0, "Transmit m_defrag() failures");
1556
1557         tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
1558             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "GEM statistics");
1559         child = SYSCTL_CHILDREN(tree);
1560
1561         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD,
1562             &sc->stats.tx_bytes, "Total bytes transmitted");
1563
1564         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD,
1565             &sc->stats.tx_frames, 0, "Total frames transmitted");
1566
1567         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD,
1568             &sc->stats.tx_frames_bcast, 0,
1569             "Number broadcast frames transmitted");
1570
1571         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD,
1572             &sc->stats.tx_frames_multi, 0,
1573             "Number multicast frames transmitted");
1574
1575         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause",
1576             CTLFLAG_RD, &sc->stats.tx_frames_pause, 0,
1577             "Number pause frames transmitted");
1578
1579         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD,
1580             &sc->stats.tx_frames_64b, 0,
1581             "Number frames transmitted of size 64 bytes or less");
1582
1583         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD,
1584             &sc->stats.tx_frames_65to127b, 0,
1585             "Number frames transmitted of size 65-127 bytes");
1586
1587         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b",
1588             CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0,
1589             "Number frames transmitted of size 128-255 bytes");
1590
1591         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b",
1592             CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0,
1593             "Number frames transmitted of size 256-511 bytes");
1594
1595         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b",
1596             CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0,
1597             "Number frames transmitted of size 512-1023 bytes");
1598
1599         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b",
1600             CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0,
1601             "Number frames transmitted of size 1024-1536 bytes");
1602
1603         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs",
1604             CTLFLAG_RD, &sc->stats.tx_under_runs, 0,
1605             "Number transmit under-run events");
1606
1607         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn",
1608             CTLFLAG_RD, &sc->stats.tx_single_collisn, 0,
1609             "Number single-collision transmit frames");
1610
1611         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn",
1612             CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0,
1613             "Number multi-collision transmit frames");
1614
1615         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn",
1616             CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0,
1617             "Number excessive collision transmit frames");
1618
1619         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn",
1620             CTLFLAG_RD, &sc->stats.tx_late_collisn, 0,
1621             "Number late-collision transmit frames");
1622
1623         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames",
1624             CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0,
1625             "Number deferred transmit frames");
1626
1627         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs",
1628             CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0,
1629             "Number carrier sense errors on transmit");
1630
1631         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD,
1632             &sc->stats.rx_bytes, "Total bytes received");
1633
1634         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD,
1635             &sc->stats.rx_frames, 0, "Total frames received");
1636
1637         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast",
1638             CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0,
1639             "Number broadcast frames received");
1640
1641         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi",
1642             CTLFLAG_RD, &sc->stats.rx_frames_multi, 0,
1643             "Number multicast frames received");
1644
1645         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause",
1646             CTLFLAG_RD, &sc->stats.rx_frames_pause, 0,
1647             "Number pause frames received");
1648
1649         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b",
1650             CTLFLAG_RD, &sc->stats.rx_frames_64b, 0,
1651             "Number frames received of size 64 bytes or less");
1652
1653         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b",
1654             CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0,
1655             "Number frames received of size 65-127 bytes");
1656
1657         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b",
1658             CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0,
1659             "Number frames received of size 128-255 bytes");
1660
1661         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b",
1662             CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0,
1663             "Number frames received of size 256-511 bytes");
1664
1665         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b",
1666             CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0,
1667             "Number frames received of size 512-1023 bytes");
1668
1669         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b",
1670             CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0,
1671             "Number frames received of size 1024-1536 bytes");
1672
1673         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize",
1674             CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0,
1675             "Number undersize frames received");
1676
1677         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize",
1678             CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0,
1679             "Number oversize frames received");
1680
1681         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber",
1682             CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0,
1683             "Number jabber frames received");
1684
1685         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs",
1686             CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0,
1687             "Number frames received with FCS errors");
1688
1689         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs",
1690             CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0,
1691             "Number frames received with length errors");
1692
1693         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs",
1694             CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0,
1695             "Number receive symbol errors");
1696
1697         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs",
1698             CTLFLAG_RD, &sc->stats.rx_align_errs, 0,
1699             "Number receive alignment errors");
1700
1701         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs",
1702             CTLFLAG_RD, &sc->stats.rx_resource_errs, 0,
1703             "Number frames received when no rx buffer available");
1704
1705         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs",
1706             CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0,
1707             "Number frames received but not copied due to receive overrun");
1708
1709         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs",
1710             CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0,
1711             "Number frames received with IP header checksum errors");
1712
1713         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs",
1714             CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0,
1715             "Number frames received with TCP checksum errors");
1716
1717         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs",
1718             CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0,
1719             "Number frames received with UDP checksum errors");
1720 }
1721
1722 static int
1723 cgem_probe(device_t dev)
1724 {
1725
1726         if (!ofw_bus_status_okay(dev))
1727                 return (ENXIO);
1728
1729         if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
1730                 return (ENXIO);
1731
1732         device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface");
1733         return (0);
1734 }
1735
1736 static int
1737 cgem_attach(device_t dev)
1738 {
1739         struct cgem_softc *sc = device_get_softc(dev);
1740         if_t ifp = NULL;
1741         int rid, err;
1742         u_char eaddr[ETHER_ADDR_LEN];
1743         int hwtype;
1744 #ifndef EXT_RESOURCES
1745         phandle_t node;
1746         pcell_t cell;
1747 #endif
1748
1749         sc->dev = dev;
1750         CGEM_LOCK_INIT(sc);
1751
1752         /* Key off of compatible string and set hardware-specific options. */
1753         hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
1754         if (hwtype == HWTYPE_ZYNQMP)
1755                 sc->neednullqs = 1;
1756         if (hwtype == HWTYPE_ZYNQ)
1757                 sc->rxhangwar = 1;
1758
1759 #ifdef EXT_RESOURCES
1760         if (hwtype == HWTYPE_ZYNQ || hwtype == HWTYPE_ZYNQMP) {
1761                 if (clk_get_by_ofw_name(dev, 0, "tx_clk", &sc->ref_clk) != 0)
1762                         device_printf(dev,
1763                             "could not retrieve reference clock.\n");
1764                 else if (clk_enable(sc->ref_clk) != 0)
1765                         device_printf(dev, "could not enable clock.\n");
1766         } else if (hwtype == HWTYPE_SIFIVE) {
1767                 if (clk_get_by_ofw_name(dev, 0, "pclk", &sc->ref_clk) != 0)
1768                         device_printf(dev,
1769                             "could not retrieve reference clock.\n");
1770                 else if (clk_enable(sc->ref_clk) != 0)
1771                         device_printf(dev, "could not enable clock.\n");
1772         }
1773 #else
1774         /* Get reference clock number and base divider from fdt. */
1775         node = ofw_bus_get_node(dev);
1776         sc->ref_clk_num = 0;
1777         if (OF_getprop(node, "ref-clock-num", &cell, sizeof(cell)) > 0)
1778                 sc->ref_clk_num = fdt32_to_cpu(cell);
1779 #endif
1780
1781         /* Get memory resource. */
1782         rid = 0;
1783         sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1784             RF_ACTIVE);
1785         if (sc->mem_res == NULL) {
1786                 device_printf(dev, "could not allocate memory resources.\n");
1787                 return (ENOMEM);
1788         }
1789
1790         /* Get IRQ resource. */
1791         rid = 0;
1792         sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1793             RF_ACTIVE);
1794         if (sc->irq_res == NULL) {
1795                 device_printf(dev, "could not allocate interrupt resource.\n");
1796                 cgem_detach(dev);
1797                 return (ENOMEM);
1798         }
1799
1800         /* Set up ifnet structure. */
1801         ifp = sc->ifp = if_alloc(IFT_ETHER);
1802         if (ifp == NULL) {
1803                 device_printf(dev, "could not allocate ifnet structure\n");
1804                 cgem_detach(dev);
1805                 return (ENOMEM);
1806         }
1807         if_setsoftc(ifp, sc);
1808         if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev));
1809         if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1810         if_setinitfn(ifp, cgem_init);
1811         if_setioctlfn(ifp, cgem_ioctl);
1812         if_setstartfn(ifp, cgem_start);
1813         if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
1814             IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM, 0);
1815         if_setsendqlen(ifp, CGEM_NUM_TX_DESCS);
1816         if_setsendqready(ifp);
1817
1818         /* Disable hardware checksumming by default. */
1819         if_sethwassist(ifp, 0);
1820         if_setcapenable(ifp, if_getcapabilities(ifp) &
1821             ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM));
1822
1823         sc->if_old_flags = if_getflags(ifp);
1824         sc->rxbufs = DEFAULT_NUM_RX_BUFS;
1825
1826         /* Reset hardware. */
1827         CGEM_LOCK(sc);
1828         cgem_reset(sc);
1829         CGEM_UNLOCK(sc);
1830
1831         /* Attach phy to mii bus. */
1832         err = mii_attach(dev, &sc->miibus, ifp,
1833             cgem_ifmedia_upd, cgem_ifmedia_sts, BMSR_DEFCAPMASK,
1834             MII_PHY_ANY, MII_OFFSET_ANY, 0);
1835         if (err)
1836                 device_printf(dev, "warning: attaching PHYs failed\n");
1837
1838         /* Set up TX and RX descriptor area. */
1839         err = cgem_setup_descs(sc);
1840         if (err) {
1841                 device_printf(dev, "could not set up dma mem for descs.\n");
1842                 cgem_detach(dev);
1843                 return (ENOMEM);
1844         }
1845
1846         /* Get a MAC address. */
1847         cgem_get_mac(sc, eaddr);
1848
1849         /* Start ticks. */
1850         callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1851
1852         ether_ifattach(ifp, eaddr);
1853
1854         err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
1855             INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
1856         if (err) {
1857                 device_printf(dev, "could not set interrupt handler.\n");
1858                 ether_ifdetach(ifp);
1859                 cgem_detach(dev);
1860                 return (err);
1861         }
1862
1863         cgem_add_sysctls(dev);
1864
1865         return (0);
1866 }
1867
1868 static int
1869 cgem_detach(device_t dev)
1870 {
1871         struct cgem_softc *sc = device_get_softc(dev);
1872         int i;
1873
1874         if (sc == NULL)
1875                 return (ENODEV);
1876
1877         if (device_is_attached(dev)) {
1878                 CGEM_LOCK(sc);
1879                 cgem_stop(sc);
1880                 CGEM_UNLOCK(sc);
1881                 callout_drain(&sc->tick_ch);
1882                 if_setflagbits(sc->ifp, 0, IFF_UP);
1883                 ether_ifdetach(sc->ifp);
1884         }
1885
1886         if (sc->miibus != NULL) {
1887                 device_delete_child(dev, sc->miibus);
1888                 sc->miibus = NULL;
1889         }
1890
1891         /* Release resources. */
1892         if (sc->mem_res != NULL) {
1893                 bus_release_resource(dev, SYS_RES_MEMORY,
1894                     rman_get_rid(sc->mem_res), sc->mem_res);
1895                 sc->mem_res = NULL;
1896         }
1897         if (sc->irq_res != NULL) {
1898                 if (sc->intrhand)
1899                         bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1900                 bus_release_resource(dev, SYS_RES_IRQ,
1901                     rman_get_rid(sc->irq_res), sc->irq_res);
1902                 sc->irq_res = NULL;
1903         }
1904
1905         /* Release DMA resources. */
1906         if (sc->rxring != NULL) {
1907                 if (sc->rxring_physaddr != 0) {
1908                         bus_dmamap_unload(sc->desc_dma_tag,
1909                             sc->rxring_dma_map);
1910                         sc->rxring_physaddr = 0;
1911                         sc->txring_physaddr = 0;
1912                         sc->null_qs_physaddr = 0;
1913                 }
1914                 bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
1915                                 sc->rxring_dma_map);
1916                 sc->rxring = NULL;
1917                 sc->txring = NULL;
1918                 sc->null_qs = NULL;
1919
1920                 for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
1921                         if (sc->rxring_m_dmamap[i] != NULL) {
1922                                 bus_dmamap_destroy(sc->mbuf_dma_tag,
1923                                     sc->rxring_m_dmamap[i]);
1924                                 sc->rxring_m_dmamap[i] = NULL;
1925                         }
1926                 for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
1927                         if (sc->txring_m_dmamap[i] != NULL) {
1928                                 bus_dmamap_destroy(sc->mbuf_dma_tag,
1929                                     sc->txring_m_dmamap[i]);
1930                                 sc->txring_m_dmamap[i] = NULL;
1931                         }
1932         }
1933         if (sc->desc_dma_tag != NULL) {
1934                 bus_dma_tag_destroy(sc->desc_dma_tag);
1935                 sc->desc_dma_tag = NULL;
1936         }
1937         if (sc->mbuf_dma_tag != NULL) {
1938                 bus_dma_tag_destroy(sc->mbuf_dma_tag);
1939                 sc->mbuf_dma_tag = NULL;
1940         }
1941
1942 #ifdef EXT_RESOURCES
1943         if (sc->ref_clk != NULL) {
1944                 clk_release(sc->ref_clk);
1945                 sc->ref_clk = NULL;
1946         }
1947 #endif
1948
1949         bus_generic_detach(dev);
1950
1951         CGEM_LOCK_DESTROY(sc);
1952
1953         return (0);
1954 }
1955
1956 static device_method_t cgem_methods[] = {
1957         /* Device interface */
1958         DEVMETHOD(device_probe,         cgem_probe),
1959         DEVMETHOD(device_attach,        cgem_attach),
1960         DEVMETHOD(device_detach,        cgem_detach),
1961
1962         /* MII interface */
1963         DEVMETHOD(miibus_readreg,       cgem_miibus_readreg),
1964         DEVMETHOD(miibus_writereg,      cgem_miibus_writereg),
1965         DEVMETHOD(miibus_statchg,       cgem_miibus_statchg),
1966         DEVMETHOD(miibus_linkchg,       cgem_miibus_linkchg),
1967
1968         DEVMETHOD_END
1969 };
1970
1971 static driver_t cgem_driver = {
1972         "cgem",
1973         cgem_methods,
1974         sizeof(struct cgem_softc),
1975 };
1976
1977 DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL);
1978 DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL);
1979 MODULE_DEPEND(cgem, miibus, 1, 1, 1);
1980 MODULE_DEPEND(cgem, ether, 1, 1, 1);
1981 SIMPLEBUS_PNP_INFO(compat_data);