]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/cadence/if_cgem.c
Connect the installation page to the build.
[FreeBSD/FreeBSD.git] / sys / dev / cadence / if_cgem.c
1 /*-
2  * Copyright (c) 2012-2014 Thomas Skibo <thomasskibo@yahoo.com>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 /*
28  * A network interface driver for Cadence GEM Gigabit Ethernet
29  * interface such as the one used in Xilinx Zynq-7000 SoC.
30  *
31  * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
32  * (v1.4) November 16, 2012.  Xilinx doc UG585.  GEM is covered in Ch. 16
33  * and register definitions are in appendix B.18.
34  */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/module.h>
46 #include <sys/rman.h>
47 #include <sys/socket.h>
48 #include <sys/sockio.h>
49 #include <sys/sysctl.h>
50
51 #include <machine/bus.h>
52
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_arp.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_mib.h>
59 #include <net/if_types.h>
60
61 #ifdef INET
62 #include <netinet/in.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in_var.h>
65 #include <netinet/ip.h>
66 #endif
67
68 #include <net/bpf.h>
69 #include <net/bpfdesc.h>
70
71 #include <dev/fdt/fdt_common.h>
72 #include <dev/ofw/ofw_bus.h>
73 #include <dev/ofw/ofw_bus_subr.h>
74
75 #include <dev/mii/mii.h>
76 #include <dev/mii/miivar.h>
77
78 #include <dev/cadence/if_cgem_hw.h>
79
80 #include "miibus_if.h"
81
82 #define IF_CGEM_NAME "cgem"
83
84 #define CGEM_NUM_RX_DESCS       512     /* size of receive descriptor ring */
85 #define CGEM_NUM_TX_DESCS       512     /* size of transmit descriptor ring */
86
87 #define MAX_DESC_RING_SIZE (MAX(CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),\
88                                 CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc)))
89
90
91 /* Default for sysctl rxbufs.  Must be < CGEM_NUM_RX_DESCS of course. */
92 #define DEFAULT_NUM_RX_BUFS     256     /* number of receive bufs to queue. */
93
94 #define TX_MAX_DMA_SEGS         8       /* maximum segs in a tx mbuf dma */
95
96 #define CGEM_CKSUM_ASSIST       (CSUM_IP | CSUM_TCP | CSUM_UDP | \
97                                  CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
98
99 struct cgem_softc {
100         if_t                    ifp;
101         struct mtx              sc_mtx;
102         device_t                dev;
103         device_t                miibus;
104         u_int                   mii_media_active;       /* last active media */
105         int                     if_old_flags;
106         struct resource         *mem_res;
107         struct resource         *irq_res;
108         void                    *intrhand;
109         struct callout          tick_ch;
110         uint32_t                net_ctl_shadow;
111         int                     ref_clk_num;
112         u_char                  eaddr[6];
113
114         bus_dma_tag_t           desc_dma_tag;
115         bus_dma_tag_t           mbuf_dma_tag;
116
117         /* receive descriptor ring */
118         struct cgem_rx_desc     *rxring;
119         bus_addr_t              rxring_physaddr;
120         struct mbuf             *rxring_m[CGEM_NUM_RX_DESCS];
121         bus_dmamap_t            rxring_m_dmamap[CGEM_NUM_RX_DESCS];
122         int                     rxring_hd_ptr;  /* where to put rcv bufs */
123         int                     rxring_tl_ptr;  /* where to get receives */
124         int                     rxring_queued;  /* how many rcv bufs queued */
125         bus_dmamap_t            rxring_dma_map;
126         int                     rxbufs;         /* tunable number rcv bufs */
127         int                     rxhangwar;      /* rx hang work-around */
128         u_int                   rxoverruns;     /* rx overruns */
129         u_int                   rxnobufs;       /* rx buf ring empty events */
130         u_int                   rxdmamapfails;  /* rx dmamap failures */
131         uint32_t                rx_frames_prev;
132
133         /* transmit descriptor ring */
134         struct cgem_tx_desc     *txring;
135         bus_addr_t              txring_physaddr;
136         struct mbuf             *txring_m[CGEM_NUM_TX_DESCS];
137         bus_dmamap_t            txring_m_dmamap[CGEM_NUM_TX_DESCS];
138         int                     txring_hd_ptr;  /* where to put next xmits */
139         int                     txring_tl_ptr;  /* next xmit mbuf to free */
140         int                     txring_queued;  /* num xmits segs queued */
141         bus_dmamap_t            txring_dma_map;
142         u_int                   txfull;         /* tx ring full events */
143         u_int                   txdefrags;      /* tx calls to m_defrag() */
144         u_int                   txdefragfails;  /* tx m_defrag() failures */
145         u_int                   txdmamapfails;  /* tx dmamap failures */
146
147         /* hardware provided statistics */
148         struct cgem_hw_stats {
149                 uint64_t                tx_bytes;
150                 uint32_t                tx_frames;
151                 uint32_t                tx_frames_bcast;
152                 uint32_t                tx_frames_multi;
153                 uint32_t                tx_frames_pause;
154                 uint32_t                tx_frames_64b;
155                 uint32_t                tx_frames_65to127b;
156                 uint32_t                tx_frames_128to255b;
157                 uint32_t                tx_frames_256to511b;
158                 uint32_t                tx_frames_512to1023b;
159                 uint32_t                tx_frames_1024to1536b;
160                 uint32_t                tx_under_runs;
161                 uint32_t                tx_single_collisn;
162                 uint32_t                tx_multi_collisn;
163                 uint32_t                tx_excsv_collisn;
164                 uint32_t                tx_late_collisn;
165                 uint32_t                tx_deferred_frames;
166                 uint32_t                tx_carrier_sense_errs;
167
168                 uint64_t                rx_bytes;
169                 uint32_t                rx_frames;
170                 uint32_t                rx_frames_bcast;
171                 uint32_t                rx_frames_multi;
172                 uint32_t                rx_frames_pause;
173                 uint32_t                rx_frames_64b;
174                 uint32_t                rx_frames_65to127b;
175                 uint32_t                rx_frames_128to255b;
176                 uint32_t                rx_frames_256to511b;
177                 uint32_t                rx_frames_512to1023b;
178                 uint32_t                rx_frames_1024to1536b;
179                 uint32_t                rx_frames_undersize;
180                 uint32_t                rx_frames_oversize;
181                 uint32_t                rx_frames_jabber;
182                 uint32_t                rx_frames_fcs_errs;
183                 uint32_t                rx_frames_length_errs;
184                 uint32_t                rx_symbol_errs;
185                 uint32_t                rx_align_errs;
186                 uint32_t                rx_resource_errs;
187                 uint32_t                rx_overrun_errs;
188                 uint32_t                rx_ip_hdr_csum_errs;
189                 uint32_t                rx_tcp_csum_errs;
190                 uint32_t                rx_udp_csum_errs;
191         } stats;
192 };
193
194 #define RD4(sc, off)            (bus_read_4((sc)->mem_res, (off)))
195 #define WR4(sc, off, val)       (bus_write_4((sc)->mem_res, (off), (val)))
196 #define BARRIER(sc, off, len, flags) \
197         (bus_barrier((sc)->mem_res, (off), (len), (flags))
198
199 #define CGEM_LOCK(sc)           mtx_lock(&(sc)->sc_mtx)
200 #define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
201 #define CGEM_LOCK_INIT(sc)      \
202         mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \
203                  MTX_NETWORK_LOCK, MTX_DEF)
204 #define CGEM_LOCK_DESTROY(sc)   mtx_destroy(&(sc)->sc_mtx)
205 #define CGEM_ASSERT_LOCKED(sc)  mtx_assert(&(sc)->sc_mtx, MA_OWNED)
206
207 /* Allow platforms to optionally provide a way to set the reference clock. */
208 int cgem_set_ref_clk(int unit, int frequency);
209
210 static devclass_t cgem_devclass;
211
212 static int cgem_probe(device_t dev);
213 static int cgem_attach(device_t dev);
214 static int cgem_detach(device_t dev);
215 static void cgem_tick(void *);
216 static void cgem_intr(void *);
217
218 static void cgem_mediachange(struct cgem_softc *, struct mii_data *);
219
220 static void
221 cgem_get_mac(struct cgem_softc *sc, u_char eaddr[])
222 {
223         int i;
224         uint32_t rnd;
225
226         /* See if boot loader gave us a MAC address already. */
227         for (i = 0; i < 4; i++) {
228                 uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i));
229                 uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff;
230                 if (low != 0 || high != 0) {
231                         eaddr[0] = low & 0xff;
232                         eaddr[1] = (low >> 8) & 0xff;
233                         eaddr[2] = (low >> 16) & 0xff;
234                         eaddr[3] = (low >> 24) & 0xff;
235                         eaddr[4] = high & 0xff;
236                         eaddr[5] = (high >> 8) & 0xff;
237                         break;
238                 }
239         }
240
241         /* No MAC from boot loader?  Assign a random one. */
242         if (i == 4) {
243                 rnd = arc4random();
244
245                 eaddr[0] = 'b';
246                 eaddr[1] = 's';
247                 eaddr[2] = 'd';
248                 eaddr[3] = (rnd >> 16) & 0xff;
249                 eaddr[4] = (rnd >> 8) & 0xff;
250                 eaddr[5] = rnd & 0xff;
251
252                 device_printf(sc->dev, "no mac address found, assigning "
253                               "random: %02x:%02x:%02x:%02x:%02x:%02x\n",
254                               eaddr[0], eaddr[1], eaddr[2],
255                               eaddr[3], eaddr[4], eaddr[5]);
256         }
257
258         /* Move address to first slot and zero out the rest. */
259         WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
260             (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
261         WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
262
263         for (i = 1; i < 4; i++) {
264                 WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0);
265                 WR4(sc, CGEM_SPEC_ADDR_HI(i), 0);
266         }
267 }
268
269 /* cgem_mac_hash():  map 48-bit address to a 6-bit hash.
270  * The 6-bit hash corresponds to a bit in a 64-bit hash
271  * register.  Setting that bit in the hash register enables
272  * reception of all frames with a destination address that hashes
273  * to that 6-bit value.
274  *
275  * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
276  * Reference Manual.  Bits 0-5 in the hash are the exclusive-or of
277  * every sixth bit in the destination address.
278  */
279 static int
280 cgem_mac_hash(u_char eaddr[])
281 {
282         int hash;
283         int i, j;
284
285         hash = 0;
286         for (i = 0; i < 6; i++)
287                 for (j = i; j < 48; j += 6)
288                         if ((eaddr[j >> 3] & (1 << (j & 7))) != 0)
289                                 hash ^= (1 << i);
290
291         return hash;
292 }
293
294 /* After any change in rx flags or multi-cast addresses, set up
295  * hash registers and net config register bits.
296  */
297 static void
298 cgem_rx_filter(struct cgem_softc *sc)
299 {
300         if_t ifp = sc->ifp;
301         u_char *mta;
302
303         int index, i, mcnt;
304         uint32_t hash_hi, hash_lo;
305         uint32_t net_cfg;
306
307         hash_hi = 0;
308         hash_lo = 0;
309
310         net_cfg = RD4(sc, CGEM_NET_CFG);
311
312         net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
313                      CGEM_NET_CFG_NO_BCAST | 
314                      CGEM_NET_CFG_COPY_ALL);
315
316         if ((if_getflags(ifp) & IFF_PROMISC) != 0)
317                 net_cfg |= CGEM_NET_CFG_COPY_ALL;
318         else {
319                 if ((if_getflags(ifp) & IFF_BROADCAST) == 0)
320                         net_cfg |= CGEM_NET_CFG_NO_BCAST;
321                 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
322                         hash_hi = 0xffffffff;
323                         hash_lo = 0xffffffff;
324                 } else {
325                         mcnt = if_multiaddr_count(ifp, -1);
326                         mta = malloc(ETHER_ADDR_LEN * mcnt, M_DEVBUF,
327                                      M_NOWAIT);
328                         if (mta == NULL) {
329                                 device_printf(sc->dev,
330                                       "failed to allocate temp mcast list\n");
331                                 return;
332                         }
333                         if_multiaddr_array(ifp, mta, &mcnt, mcnt);
334                         for (i = 0; i < mcnt; i++) {
335                                 index = cgem_mac_hash(
336                                         LLADDR((struct sockaddr_dl *)
337                                                (mta + (i * ETHER_ADDR_LEN))));
338                                 if (index > 31)
339                                         hash_hi |= (1 << (index - 32));
340                                 else
341                                         hash_lo |= (1 << index);
342                         }
343                         free(mta, M_DEVBUF);
344                 }
345
346                 if (hash_hi != 0 || hash_lo != 0)
347                         net_cfg |= CGEM_NET_CFG_MULTI_HASH_EN;
348         }
349
350         WR4(sc, CGEM_HASH_TOP, hash_hi);
351         WR4(sc, CGEM_HASH_BOT, hash_lo);
352         WR4(sc, CGEM_NET_CFG, net_cfg);
353 }
354
355 /* For bus_dmamap_load() callback. */
356 static void
357 cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
358 {
359
360         if (nsegs != 1 || error != 0)
361                 return;
362         *(bus_addr_t *)arg = segs[0].ds_addr;
363 }
364
365 /* Create DMA'able descriptor rings. */
366 static int
367 cgem_setup_descs(struct cgem_softc *sc)
368 {
369         int i, err;
370
371         sc->txring = NULL;
372         sc->rxring = NULL;
373
374         /* Allocate non-cached DMA space for RX and TX descriptors.
375          */
376         err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
377                                  BUS_SPACE_MAXADDR_32BIT,
378                                  BUS_SPACE_MAXADDR,
379                                  NULL, NULL,
380                                  MAX_DESC_RING_SIZE,
381                                  1,
382                                  MAX_DESC_RING_SIZE,
383                                  0,
384                                  busdma_lock_mutex,
385                                  &sc->sc_mtx,
386                                  &sc->desc_dma_tag);
387         if (err)
388                 return (err);
389
390         /* Set up a bus_dma_tag for mbufs. */
391         err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
392                                  BUS_SPACE_MAXADDR_32BIT,
393                                  BUS_SPACE_MAXADDR,
394                                  NULL, NULL,
395                                  MCLBYTES,
396                                  TX_MAX_DMA_SEGS,
397                                  MCLBYTES,
398                                  0,
399                                  busdma_lock_mutex,
400                                  &sc->sc_mtx,
401                                  &sc->mbuf_dma_tag);
402         if (err)
403                 return (err);
404
405         /* Allocate DMA memory in non-cacheable space. */
406         err = bus_dmamem_alloc(sc->desc_dma_tag,
407                                (void **)&sc->rxring,
408                                BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
409                                &sc->rxring_dma_map);
410         if (err)
411                 return (err);
412
413         /* Load descriptor DMA memory. */
414         err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
415                               (void *)sc->rxring,
416                               CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),
417                               cgem_getaddr, &sc->rxring_physaddr,
418                               BUS_DMA_NOWAIT);
419         if (err)
420                 return (err);
421
422         /* Initialize RX descriptors. */
423         for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
424                 sc->rxring[i].addr = CGEM_RXDESC_OWN;
425                 sc->rxring[i].ctl = 0;
426                 sc->rxring_m[i] = NULL;
427                 sc->rxring_m_dmamap[i] = NULL;
428         }
429         sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
430
431         sc->rxring_hd_ptr = 0;
432         sc->rxring_tl_ptr = 0;
433         sc->rxring_queued = 0;
434
435         /* Allocate DMA memory for TX descriptors in non-cacheable space. */
436         err = bus_dmamem_alloc(sc->desc_dma_tag,
437                                (void **)&sc->txring,
438                                BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
439                                &sc->txring_dma_map);
440         if (err)
441                 return (err);
442
443         /* Load TX descriptor DMA memory. */
444         err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map,
445                               (void *)sc->txring,
446                               CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc),
447                               cgem_getaddr, &sc->txring_physaddr, 
448                               BUS_DMA_NOWAIT);
449         if (err)
450                 return (err);
451
452         /* Initialize TX descriptor ring. */
453         for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
454                 sc->txring[i].addr = 0;
455                 sc->txring[i].ctl = CGEM_TXDESC_USED;
456                 sc->txring_m[i] = NULL;
457                 sc->txring_m_dmamap[i] = NULL;
458         }
459         sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
460
461         sc->txring_hd_ptr = 0;
462         sc->txring_tl_ptr = 0;
463         sc->txring_queued = 0;
464
465         return (0);
466 }
467
468 /* Fill receive descriptor ring with mbufs. */
469 static void
470 cgem_fill_rqueue(struct cgem_softc *sc)
471 {
472         struct mbuf *m = NULL;
473         bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
474         int nsegs;
475
476         CGEM_ASSERT_LOCKED(sc);
477
478         while (sc->rxring_queued < sc->rxbufs) {
479                 /* Get a cluster mbuf. */
480                 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
481                 if (m == NULL)
482                         break;
483
484                 m->m_len = MCLBYTES;
485                 m->m_pkthdr.len = MCLBYTES;
486                 m->m_pkthdr.rcvif = sc->ifp;
487
488                 /* Load map and plug in physical address. */
489                 if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
490                               &sc->rxring_m_dmamap[sc->rxring_hd_ptr])) {
491                         sc->rxdmamapfails++;
492                         m_free(m);
493                         break;
494                 }
495                 if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, 
496                               sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
497                               segs, &nsegs, BUS_DMA_NOWAIT)) {
498                         sc->rxdmamapfails++;
499                         bus_dmamap_destroy(sc->mbuf_dma_tag,
500                                    sc->rxring_m_dmamap[sc->rxring_hd_ptr]);
501                         sc->rxring_m_dmamap[sc->rxring_hd_ptr] = NULL;
502                         m_free(m);
503                         break;
504                 }
505                 sc->rxring_m[sc->rxring_hd_ptr] = m;
506
507                 /* Sync cache with receive buffer. */
508                 bus_dmamap_sync(sc->mbuf_dma_tag,
509                                 sc->rxring_m_dmamap[sc->rxring_hd_ptr],
510                                 BUS_DMASYNC_PREREAD);
511
512                 /* Write rx descriptor and increment head pointer. */
513                 sc->rxring[sc->rxring_hd_ptr].ctl = 0;
514                 if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
515                         sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
516                                 CGEM_RXDESC_WRAP;
517                         sc->rxring_hd_ptr = 0;
518                 } else
519                         sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
520                         
521                 sc->rxring_queued++;
522         }
523 }
524
525 /* Pull received packets off of receive descriptor ring. */
526 static void
527 cgem_recv(struct cgem_softc *sc)
528 {
529         if_t ifp = sc->ifp;
530         struct mbuf *m, *m_hd, **m_tl;
531         uint32_t ctl;
532
533         CGEM_ASSERT_LOCKED(sc);
534
535         /* Pick up all packets in which the OWN bit is set. */
536         m_hd = NULL;
537         m_tl = &m_hd;
538         while (sc->rxring_queued > 0 &&
539                (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
540
541                 ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
542
543                 /* Grab filled mbuf. */
544                 m = sc->rxring_m[sc->rxring_tl_ptr];
545                 sc->rxring_m[sc->rxring_tl_ptr] = NULL;
546
547                 /* Sync cache with receive buffer. */
548                 bus_dmamap_sync(sc->mbuf_dma_tag,
549                                 sc->rxring_m_dmamap[sc->rxring_tl_ptr],
550                                 BUS_DMASYNC_POSTREAD);
551
552                 /* Unload and destroy dmamap. */
553                 bus_dmamap_unload(sc->mbuf_dma_tag,
554                         sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
555                 bus_dmamap_destroy(sc->mbuf_dma_tag,
556                                    sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
557                 sc->rxring_m_dmamap[sc->rxring_tl_ptr] = NULL;
558
559                 /* Increment tail pointer. */
560                 if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS)
561                         sc->rxring_tl_ptr = 0;
562                 sc->rxring_queued--;
563
564                 /* Check FCS and make sure entire packet landed in one mbuf
565                  * cluster (which is much bigger than the largest ethernet
566                  * packet).
567                  */
568                 if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 ||
569                     (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) !=
570                            (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) {
571                         /* discard. */
572                         m_free(m);
573                         if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
574                         continue;
575                 }
576
577                 /* Ready it to hand off to upper layers. */
578                 m->m_data += ETHER_ALIGN;
579                 m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK);
580                 m->m_pkthdr.rcvif = ifp;
581                 m->m_pkthdr.len = m->m_len;
582
583                 /* Are we using hardware checksumming?  Check the
584                  * status in the receive descriptor.
585                  */
586                 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
587                         /* TCP or UDP checks out, IP checks out too. */
588                         if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
589                             CGEM_RXDESC_CKSUM_STAT_TCP_GOOD ||
590                             (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
591                             CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) {
592                                 m->m_pkthdr.csum_flags |=
593                                         CSUM_IP_CHECKED | CSUM_IP_VALID |
594                                         CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
595                                 m->m_pkthdr.csum_data = 0xffff;
596                         } else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
597                                    CGEM_RXDESC_CKSUM_STAT_IP_GOOD) {
598                                 /* Only IP checks out. */
599                                 m->m_pkthdr.csum_flags |=
600                                         CSUM_IP_CHECKED | CSUM_IP_VALID;
601                                 m->m_pkthdr.csum_data = 0xffff;
602                         }
603                 }
604
605                 /* Queue it up for delivery below. */
606                 *m_tl = m;
607                 m_tl = &m->m_next;
608         }
609
610         /* Replenish receive buffers. */
611         cgem_fill_rqueue(sc);
612
613         /* Unlock and send up packets. */
614         CGEM_UNLOCK(sc);
615         while (m_hd != NULL) {
616                 m = m_hd;
617                 m_hd = m_hd->m_next;
618                 m->m_next = NULL;
619                 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
620                 if_input(ifp, m);
621         }
622         CGEM_LOCK(sc);
623 }
624
625 /* Find completed transmits and free their mbufs. */
626 static void
627 cgem_clean_tx(struct cgem_softc *sc)
628 {
629         struct mbuf *m;
630         uint32_t ctl;
631
632         CGEM_ASSERT_LOCKED(sc);
633
634         /* free up finished transmits. */
635         while (sc->txring_queued > 0 &&
636                ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
637                 CGEM_TXDESC_USED) != 0) {
638
639                 /* Sync cache. */
640                 bus_dmamap_sync(sc->mbuf_dma_tag,
641                                 sc->txring_m_dmamap[sc->txring_tl_ptr],
642                                 BUS_DMASYNC_POSTWRITE);
643
644                 /* Unload and destroy DMA map. */
645                 bus_dmamap_unload(sc->mbuf_dma_tag,
646                                   sc->txring_m_dmamap[sc->txring_tl_ptr]);
647                 bus_dmamap_destroy(sc->mbuf_dma_tag,
648                                    sc->txring_m_dmamap[sc->txring_tl_ptr]);
649                 sc->txring_m_dmamap[sc->txring_tl_ptr] = NULL;
650
651                 /* Free up the mbuf. */
652                 m = sc->txring_m[sc->txring_tl_ptr];
653                 sc->txring_m[sc->txring_tl_ptr] = NULL;
654                 m_freem(m);
655
656                 /* Check the status. */
657                 if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
658                         /* Serious bus error. log to console. */
659                         device_printf(sc->dev, "cgem_clean_tx: Whoa! "
660                                    "AHB error, addr=0x%x\n",
661                                    sc->txring[sc->txring_tl_ptr].addr);
662                 } else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
663                                    CGEM_TXDESC_LATE_COLL)) != 0) {
664                         if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
665                 } else
666                         if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
667
668                 /* If the packet spanned more than one tx descriptor,
669                  * skip descriptors until we find the end so that only
670                  * start-of-frame descriptors are processed.
671                  */
672                 while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) {
673                         if ((ctl & CGEM_TXDESC_WRAP) != 0)
674                                 sc->txring_tl_ptr = 0;
675                         else
676                                 sc->txring_tl_ptr++;
677                         sc->txring_queued--;
678
679                         ctl = sc->txring[sc->txring_tl_ptr].ctl;
680
681                         sc->txring[sc->txring_tl_ptr].ctl =
682                                 ctl | CGEM_TXDESC_USED;
683                 }
684
685                 /* Next descriptor. */
686                 if ((ctl & CGEM_TXDESC_WRAP) != 0)
687                         sc->txring_tl_ptr = 0;
688                 else
689                         sc->txring_tl_ptr++;
690                 sc->txring_queued--;
691
692                 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
693         }
694 }
695
696 /* Start transmits. */
697 static void
698 cgem_start_locked(if_t ifp)
699 {
700         struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
701         struct mbuf *m;
702         bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
703         uint32_t ctl;
704         int i, nsegs, wrap, err;
705
706         CGEM_ASSERT_LOCKED(sc);
707
708         if ((if_getdrvflags(ifp) & IFF_DRV_OACTIVE) != 0)
709                 return;
710
711         for (;;) {
712                 /* Check that there is room in the descriptor ring. */
713                 if (sc->txring_queued >=
714                     CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
715
716                         /* Try to make room. */
717                         cgem_clean_tx(sc);
718
719                         /* Still no room? */
720                         if (sc->txring_queued >=
721                             CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
722                                 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
723                                 sc->txfull++;
724                                 break;
725                         }
726                 }
727
728                 /* Grab next transmit packet. */
729                 m = if_dequeue(ifp);
730                 if (m == NULL)
731                         break;
732
733                 /* Create and load DMA map. */
734                 if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
735                               &sc->txring_m_dmamap[sc->txring_hd_ptr])) {
736                         m_freem(m);
737                         sc->txdmamapfails++;
738                         continue;
739                 }
740                 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
741                                       sc->txring_m_dmamap[sc->txring_hd_ptr],
742                                       m, segs, &nsegs, BUS_DMA_NOWAIT);
743                 if (err == EFBIG) {
744                         /* Too many segments!  defrag and try again. */
745                         struct mbuf *m2 = m_defrag(m, M_NOWAIT);
746
747                         if (m2 == NULL) {
748                                 sc->txdefragfails++;
749                                 m_freem(m);
750                                 bus_dmamap_destroy(sc->mbuf_dma_tag,
751                                    sc->txring_m_dmamap[sc->txring_hd_ptr]);
752                                 sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
753                                 continue;
754                         }
755                         m = m2;
756                         err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
757                                       sc->txring_m_dmamap[sc->txring_hd_ptr],
758                                       m, segs, &nsegs, BUS_DMA_NOWAIT);
759                         sc->txdefrags++;
760                 }
761                 if (err) {
762                         /* Give up. */
763                         m_freem(m);
764                         bus_dmamap_destroy(sc->mbuf_dma_tag,
765                                    sc->txring_m_dmamap[sc->txring_hd_ptr]);
766                         sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
767                         sc->txdmamapfails++;
768                         continue;
769                 }
770                 sc->txring_m[sc->txring_hd_ptr] = m;
771
772                 /* Sync tx buffer with cache. */
773                 bus_dmamap_sync(sc->mbuf_dma_tag,
774                                 sc->txring_m_dmamap[sc->txring_hd_ptr],
775                                 BUS_DMASYNC_PREWRITE);
776
777                 /* Set wrap flag if next packet might run off end of ring. */
778                 wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
779                         CGEM_NUM_TX_DESCS;
780
781                 /* Fill in the TX descriptors back to front so that USED
782                  * bit in first descriptor is cleared last.
783                  */
784                 for (i = nsegs - 1; i >= 0; i--) {
785                         /* Descriptor address. */
786                         sc->txring[sc->txring_hd_ptr + i].addr =
787                                 segs[i].ds_addr;
788
789                         /* Descriptor control word. */
790                         ctl = segs[i].ds_len;
791                         if (i == nsegs - 1) {
792                                 ctl |= CGEM_TXDESC_LAST_BUF;
793                                 if (wrap)
794                                         ctl |= CGEM_TXDESC_WRAP;
795                         }
796                         sc->txring[sc->txring_hd_ptr + i].ctl = ctl;
797
798                         if (i != 0)
799                                 sc->txring_m[sc->txring_hd_ptr + i] = NULL;
800                 }
801
802                 if (wrap)
803                         sc->txring_hd_ptr = 0;
804                 else
805                         sc->txring_hd_ptr += nsegs;
806                 sc->txring_queued += nsegs;
807
808                 /* Kick the transmitter. */
809                 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
810                     CGEM_NET_CTRL_START_TX);
811
812                 /* If there is a BPF listener, bounce a copy to him. */
813                 ETHER_BPF_MTAP(ifp, m);
814         }
815 }
816
817 static void
818 cgem_start(if_t ifp)
819 {
820         struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
821
822         CGEM_LOCK(sc);
823         cgem_start_locked(ifp);
824         CGEM_UNLOCK(sc);
825 }
826
827 static void
828 cgem_poll_hw_stats(struct cgem_softc *sc)
829 {
830         uint32_t n;
831
832         CGEM_ASSERT_LOCKED(sc);
833
834         sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT);
835         sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32;
836
837         sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX);
838         sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX);
839         sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX);
840         sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX);
841         sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX);
842         sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX);
843         sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX);
844         sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX);
845         sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX);
846         sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX);
847         sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS);
848
849         n = RD4(sc, CGEM_SINGLE_COLL_FRAMES);
850         sc->stats.tx_single_collisn += n;
851         if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
852         n = RD4(sc, CGEM_MULTI_COLL_FRAMES);
853         sc->stats.tx_multi_collisn += n;
854         if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
855         n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES);
856         sc->stats.tx_excsv_collisn += n;
857         if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
858         n = RD4(sc, CGEM_LATE_COLL);
859         sc->stats.tx_late_collisn += n;
860         if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
861
862         sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES);
863         sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS);
864
865         sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT);
866         sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32;
867
868         sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX);
869         sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX);
870         sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX);
871         sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX);
872         sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX);
873         sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX);
874         sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX);
875         sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX);
876         sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX);
877         sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX);
878         sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX);
879         sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX);
880         sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX);
881         sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS);
882         sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS);
883         sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS);
884         sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS);
885         sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS);
886         sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS);
887         sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS);
888         sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS);
889         sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS);
890 }
891
892 static void
893 cgem_tick(void *arg)
894 {
895         struct cgem_softc *sc = (struct cgem_softc *)arg;
896         struct mii_data *mii;
897
898         CGEM_ASSERT_LOCKED(sc);
899
900         /* Poll the phy. */
901         if (sc->miibus != NULL) {
902                 mii = device_get_softc(sc->miibus);
903                 mii_tick(mii);
904         }
905
906         /* Poll statistics registers. */
907         cgem_poll_hw_stats(sc);
908
909         /* Check for receiver hang. */
910         if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) {
911                 /*
912                  * Reset receiver logic by toggling RX_EN bit.  1usec
913                  * delay is necessary especially when operating at 100mbps
914                  * and 10mbps speeds.
915                  */
916                 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow &
917                     ~CGEM_NET_CTRL_RX_EN);
918                 DELAY(1);
919                 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
920         }
921         sc->rx_frames_prev = sc->stats.rx_frames;
922
923         /* Next callout in one second. */
924         callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
925 }
926
927 /* Interrupt handler. */
928 static void
929 cgem_intr(void *arg)
930 {
931         struct cgem_softc *sc = (struct cgem_softc *)arg;
932         if_t ifp = sc->ifp;
933         uint32_t istatus;
934
935         CGEM_LOCK(sc);
936
937         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
938                 CGEM_UNLOCK(sc);
939                 return;
940         }
941
942         /* Read interrupt status and immediately clear the bits. */
943         istatus = RD4(sc, CGEM_INTR_STAT);
944         WR4(sc, CGEM_INTR_STAT, istatus);
945
946         /* Packets received. */
947         if ((istatus & CGEM_INTR_RX_COMPLETE) != 0)
948                 cgem_recv(sc);
949
950         /* Free up any completed transmit buffers. */
951         cgem_clean_tx(sc);
952
953         /* Hresp not ok.  Something is very bad with DMA.  Try to clear. */
954         if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) {
955                 device_printf(sc->dev, "cgem_intr: hresp not okay! "
956                               "rx_status=0x%x\n", RD4(sc, CGEM_RX_STAT));
957                 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK);
958         }
959
960         /* Receiver overrun. */
961         if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) {
962                 /* Clear status bit. */
963                 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN);
964                 sc->rxoverruns++;
965         }
966
967         /* Receiver ran out of bufs. */
968         if ((istatus & CGEM_INTR_RX_USED_READ) != 0) {
969                 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
970                     CGEM_NET_CTRL_FLUSH_DPRAM_PKT);
971                 cgem_fill_rqueue(sc);
972                 sc->rxnobufs++;
973         }
974
975         /* Restart transmitter if needed. */
976         if (!if_sendq_empty(ifp))
977                 cgem_start_locked(ifp);
978
979         CGEM_UNLOCK(sc);
980 }
981
982 /* Reset hardware. */
983 static void
984 cgem_reset(struct cgem_softc *sc)
985 {
986
987         CGEM_ASSERT_LOCKED(sc);
988
989         WR4(sc, CGEM_NET_CTRL, 0);
990         WR4(sc, CGEM_NET_CFG, 0);
991         WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
992         WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
993         WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
994         WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL);
995         WR4(sc, CGEM_HASH_BOT, 0);
996         WR4(sc, CGEM_HASH_TOP, 0);
997         WR4(sc, CGEM_TX_QBAR, 0);       /* manual says do this. */
998         WR4(sc, CGEM_RX_QBAR, 0);
999
1000         /* Get management port running even if interface is down. */
1001         WR4(sc, CGEM_NET_CFG,
1002             CGEM_NET_CFG_DBUS_WIDTH_32 |
1003             CGEM_NET_CFG_MDC_CLK_DIV_64);
1004
1005         sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
1006         WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
1007 }
1008
1009 /* Bring up the hardware. */
1010 static void
1011 cgem_config(struct cgem_softc *sc)
1012 {
1013         if_t ifp = sc->ifp;
1014         uint32_t net_cfg;
1015         uint32_t dma_cfg;
1016         u_char *eaddr = if_getlladdr(ifp);
1017
1018         CGEM_ASSERT_LOCKED(sc);
1019
1020         /* Program Net Config Register. */
1021         net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 |
1022                 CGEM_NET_CFG_MDC_CLK_DIV_64 |
1023                 CGEM_NET_CFG_FCS_REMOVE |
1024                 CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
1025                 CGEM_NET_CFG_GIGE_EN |
1026                 CGEM_NET_CFG_1536RXEN |
1027                 CGEM_NET_CFG_FULL_DUPLEX |
1028                 CGEM_NET_CFG_SPEED100;
1029
1030         /* Enable receive checksum offloading? */
1031         if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
1032                 net_cfg |=  CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
1033
1034         WR4(sc, CGEM_NET_CFG, net_cfg);
1035
1036         /* Program DMA Config Register. */
1037         dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) |
1038                 CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
1039                 CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
1040                 CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 |
1041                 CGEM_DMA_CFG_DISC_WHEN_NO_AHB;
1042
1043         /* Enable transmit checksum offloading? */
1044         if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
1045                 dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN;
1046
1047         WR4(sc, CGEM_DMA_CFG, dma_cfg);
1048
1049         /* Write the rx and tx descriptor ring addresses to the QBAR regs. */
1050         WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr);
1051         WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr);
1052         
1053         /* Enable rx and tx. */
1054         sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
1055         WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
1056
1057         /* Set receive address in case it changed. */
1058         WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
1059             (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
1060         WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
1061
1062         /* Set up interrupts. */
1063         WR4(sc, CGEM_INTR_EN,
1064             CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN |
1065             CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ |
1066             CGEM_INTR_HRESP_NOT_OK);
1067 }
1068
1069 /* Turn on interface and load up receive ring with buffers. */
1070 static void
1071 cgem_init_locked(struct cgem_softc *sc)
1072 {
1073         struct mii_data *mii;
1074
1075         CGEM_ASSERT_LOCKED(sc);
1076
1077         if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0)
1078                 return;
1079
1080         cgem_config(sc);
1081         cgem_fill_rqueue(sc);
1082
1083         if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
1084
1085         mii = device_get_softc(sc->miibus);
1086         mii_mediachg(mii);
1087
1088         callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
1089 }
1090
1091 static void
1092 cgem_init(void *arg)
1093 {
1094         struct cgem_softc *sc = (struct cgem_softc *)arg;
1095
1096         CGEM_LOCK(sc);
1097         cgem_init_locked(sc);
1098         CGEM_UNLOCK(sc);
1099 }
1100
1101 /* Turn off interface.  Free up any buffers in transmit or receive queues. */
1102 static void
1103 cgem_stop(struct cgem_softc *sc)
1104 {
1105         int i;
1106
1107         CGEM_ASSERT_LOCKED(sc);
1108
1109         callout_stop(&sc->tick_ch);
1110
1111         /* Shut down hardware. */
1112         cgem_reset(sc);
1113
1114         /* Clear out transmit queue. */
1115         for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
1116                 sc->txring[i].ctl = CGEM_TXDESC_USED;
1117                 sc->txring[i].addr = 0;
1118                 if (sc->txring_m[i]) {
1119                         /* Unload and destroy dmamap. */
1120                         bus_dmamap_unload(sc->mbuf_dma_tag,
1121                                           sc->txring_m_dmamap[i]);
1122                         bus_dmamap_destroy(sc->mbuf_dma_tag,
1123                                            sc->txring_m_dmamap[i]);
1124                         sc->txring_m_dmamap[i] = NULL;
1125                         m_freem(sc->txring_m[i]);
1126                         sc->txring_m[i] = NULL;
1127                 }
1128         }
1129         sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
1130
1131         sc->txring_hd_ptr = 0;
1132         sc->txring_tl_ptr = 0;
1133         sc->txring_queued = 0;
1134
1135         /* Clear out receive queue. */
1136         for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
1137                 sc->rxring[i].addr = CGEM_RXDESC_OWN;
1138                 sc->rxring[i].ctl = 0;
1139                 if (sc->rxring_m[i]) {
1140                         /* Unload and destroy dmamap. */
1141                         bus_dmamap_unload(sc->mbuf_dma_tag,
1142                                   sc->rxring_m_dmamap[i]);
1143                         bus_dmamap_destroy(sc->mbuf_dma_tag,
1144                                    sc->rxring_m_dmamap[i]);
1145                         sc->rxring_m_dmamap[i] = NULL;
1146
1147                         m_freem(sc->rxring_m[i]);
1148                         sc->rxring_m[i] = NULL;
1149                 }
1150         }
1151         sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
1152
1153         sc->rxring_hd_ptr = 0;
1154         sc->rxring_tl_ptr = 0;
1155         sc->rxring_queued = 0;
1156
1157         /* Force next statchg or linkchg to program net config register. */
1158         sc->mii_media_active = 0;
1159 }
1160
1161
1162 static int
1163 cgem_ioctl(if_t ifp, u_long cmd, caddr_t data)
1164 {
1165         struct cgem_softc *sc = if_getsoftc(ifp);
1166         struct ifreq *ifr = (struct ifreq *)data;
1167         struct mii_data *mii;
1168         int error = 0, mask;
1169
1170         switch (cmd) {
1171         case SIOCSIFFLAGS:
1172                 CGEM_LOCK(sc);
1173                 if ((if_getflags(ifp) & IFF_UP) != 0) {
1174                         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1175                                 if (((if_getflags(ifp) ^ sc->if_old_flags) &
1176                                      (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1177                                         cgem_rx_filter(sc);
1178                                 }
1179                         } else {
1180                                 cgem_init_locked(sc);
1181                         }
1182                 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1183                         if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1184                         cgem_stop(sc);
1185                 }
1186                 sc->if_old_flags = if_getflags(ifp);
1187                 CGEM_UNLOCK(sc);
1188                 break;
1189
1190         case SIOCADDMULTI:
1191         case SIOCDELMULTI:
1192                 /* Set up multi-cast filters. */
1193                 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1194                         CGEM_LOCK(sc);
1195                         cgem_rx_filter(sc);
1196                         CGEM_UNLOCK(sc);
1197                 }
1198                 break;
1199
1200         case SIOCSIFMEDIA:
1201         case SIOCGIFMEDIA:
1202                 mii = device_get_softc(sc->miibus);
1203                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1204                 break;
1205
1206         case SIOCSIFCAP:
1207                 CGEM_LOCK(sc);
1208                 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
1209
1210                 if ((mask & IFCAP_TXCSUM) != 0) {
1211                         if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) {
1212                                 /* Turn on TX checksumming. */
1213                                 if_setcapenablebit(ifp, IFCAP_TXCSUM |
1214                                                    IFCAP_TXCSUM_IPV6, 0);
1215                                 if_sethwassistbits(ifp, CGEM_CKSUM_ASSIST, 0);
1216
1217                                 WR4(sc, CGEM_DMA_CFG,
1218                                     RD4(sc, CGEM_DMA_CFG) |
1219                                      CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1220                         } else {
1221                                 /* Turn off TX checksumming. */
1222                                 if_setcapenablebit(ifp, 0, IFCAP_TXCSUM |
1223                                                    IFCAP_TXCSUM_IPV6);
1224                                 if_sethwassistbits(ifp, 0, CGEM_CKSUM_ASSIST);
1225
1226                                 WR4(sc, CGEM_DMA_CFG,
1227                                     RD4(sc, CGEM_DMA_CFG) &
1228                                      ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1229                         }
1230                 }
1231                 if ((mask & IFCAP_RXCSUM) != 0) {
1232                         if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) {
1233                                 /* Turn on RX checksumming. */
1234                                 if_setcapenablebit(ifp, IFCAP_RXCSUM |
1235                                                    IFCAP_RXCSUM_IPV6, 0);
1236                                 WR4(sc, CGEM_NET_CFG,
1237                                     RD4(sc, CGEM_NET_CFG) |
1238                                      CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1239                         } else {
1240                                 /* Turn off RX checksumming. */
1241                                 if_setcapenablebit(ifp, 0, IFCAP_RXCSUM |
1242                                                    IFCAP_RXCSUM_IPV6);
1243                                 WR4(sc, CGEM_NET_CFG,
1244                                     RD4(sc, CGEM_NET_CFG) &
1245                                      ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1246                         }
1247                 }
1248                 if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) == 
1249                     (IFCAP_RXCSUM | IFCAP_TXCSUM))
1250                         if_setcapenablebit(ifp, IFCAP_VLAN_HWCSUM, 0);
1251                 else
1252                         if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWCSUM);
1253
1254                 CGEM_UNLOCK(sc);
1255                 break;
1256         default:
1257                 error = ether_ioctl(ifp, cmd, data);
1258                 break;
1259         }
1260
1261         return (error);
1262 }
1263
1264 /* MII bus support routines.
1265  */
1266 static void
1267 cgem_child_detached(device_t dev, device_t child)
1268 {
1269         struct cgem_softc *sc = device_get_softc(dev);
1270
1271         if (child == sc->miibus)
1272                 sc->miibus = NULL;
1273 }
1274
1275 static int
1276 cgem_ifmedia_upd(if_t ifp)
1277 {
1278         struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
1279         struct mii_data *mii;
1280         struct mii_softc *miisc;
1281         int error = 0;
1282
1283         mii = device_get_softc(sc->miibus);
1284         CGEM_LOCK(sc);
1285         if ((if_getflags(ifp) & IFF_UP) != 0) {
1286                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1287                         PHY_RESET(miisc);
1288                 error = mii_mediachg(mii);
1289         }
1290         CGEM_UNLOCK(sc);
1291
1292         return (error);
1293 }
1294
1295 static void
1296 cgem_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
1297 {
1298         struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
1299         struct mii_data *mii;
1300
1301         mii = device_get_softc(sc->miibus);
1302         CGEM_LOCK(sc);
1303         mii_pollstat(mii);
1304         ifmr->ifm_active = mii->mii_media_active;
1305         ifmr->ifm_status = mii->mii_media_status;
1306         CGEM_UNLOCK(sc);
1307 }
1308
1309 static int
1310 cgem_miibus_readreg(device_t dev, int phy, int reg)
1311 {
1312         struct cgem_softc *sc = device_get_softc(dev);
1313         int tries, val;
1314
1315         WR4(sc, CGEM_PHY_MAINT,
1316             CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1317             CGEM_PHY_MAINT_OP_READ |
1318             (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1319             (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT));
1320
1321         /* Wait for completion. */
1322         tries=0;
1323         while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1324                 DELAY(5);
1325                 if (++tries > 200) {
1326                         device_printf(dev, "phy read timeout: %d\n", reg);
1327                         return (-1);
1328                 }
1329         }
1330
1331         val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK;
1332
1333         if (reg == MII_EXTSR)
1334                 /*
1335                  * MAC does not support half-duplex at gig speeds.
1336                  * Let mii(4) exclude the capability.
1337                  */
1338                 val &= ~(EXTSR_1000XHDX | EXTSR_1000THDX);
1339
1340         return (val);
1341 }
1342
1343 static int
1344 cgem_miibus_writereg(device_t dev, int phy, int reg, int data)
1345 {
1346         struct cgem_softc *sc = device_get_softc(dev);
1347         int tries;
1348         
1349         WR4(sc, CGEM_PHY_MAINT,
1350             CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1351             CGEM_PHY_MAINT_OP_WRITE |
1352             (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1353             (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) |
1354             (data & CGEM_PHY_MAINT_DATA_MASK));
1355
1356         /* Wait for completion. */
1357         tries = 0;
1358         while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1359                 DELAY(5);
1360                 if (++tries > 200) {
1361                         device_printf(dev, "phy write timeout: %d\n", reg);
1362                         return (-1);
1363                 }
1364         }
1365
1366         return (0);
1367 }
1368
1369 static void
1370 cgem_miibus_statchg(device_t dev)
1371 {
1372         struct cgem_softc *sc  = device_get_softc(dev);
1373         struct mii_data *mii = device_get_softc(sc->miibus);
1374
1375         CGEM_ASSERT_LOCKED(sc);
1376
1377         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1378             (IFM_ACTIVE | IFM_AVALID) &&
1379             sc->mii_media_active != mii->mii_media_active)
1380                 cgem_mediachange(sc, mii);
1381 }
1382
1383 static void
1384 cgem_miibus_linkchg(device_t dev)
1385 {
1386         struct cgem_softc *sc  = device_get_softc(dev);
1387         struct mii_data *mii = device_get_softc(sc->miibus);
1388
1389         CGEM_ASSERT_LOCKED(sc);
1390
1391         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1392             (IFM_ACTIVE | IFM_AVALID) &&
1393             sc->mii_media_active != mii->mii_media_active)
1394                 cgem_mediachange(sc, mii);
1395 }
1396
1397 /*
1398  * Overridable weak symbol cgem_set_ref_clk().  This allows platforms to
1399  * provide a function to set the cgem's reference clock.
1400  */
1401 static int __used
1402 cgem_default_set_ref_clk(int unit, int frequency)
1403 {
1404
1405         return 0;
1406 }
1407 __weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk);
1408
1409 /* Call to set reference clock and network config bits according to media. */
1410 static void
1411 cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii)
1412 {
1413         uint32_t net_cfg;
1414         int ref_clk_freq;
1415
1416         CGEM_ASSERT_LOCKED(sc);
1417
1418         /* Update hardware to reflect media. */
1419         net_cfg = RD4(sc, CGEM_NET_CFG);
1420         net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
1421                      CGEM_NET_CFG_FULL_DUPLEX);
1422
1423         switch (IFM_SUBTYPE(mii->mii_media_active)) {
1424         case IFM_1000_T:
1425                 net_cfg |= (CGEM_NET_CFG_SPEED100 |
1426                             CGEM_NET_CFG_GIGE_EN);
1427                 ref_clk_freq = 125000000;
1428                 break;
1429         case IFM_100_TX:
1430                 net_cfg |= CGEM_NET_CFG_SPEED100;
1431                 ref_clk_freq = 25000000;
1432                 break;
1433         default:
1434                 ref_clk_freq = 2500000;
1435         }
1436
1437         if ((mii->mii_media_active & IFM_FDX) != 0)
1438                 net_cfg |= CGEM_NET_CFG_FULL_DUPLEX;
1439
1440         WR4(sc, CGEM_NET_CFG, net_cfg);
1441
1442         /* Set the reference clock if necessary. */
1443         if (cgem_set_ref_clk(sc->ref_clk_num, ref_clk_freq))
1444                 device_printf(sc->dev, "cgem_mediachange: "
1445                               "could not set ref clk%d to %d.\n",
1446                               sc->ref_clk_num, ref_clk_freq);
1447
1448         sc->mii_media_active = mii->mii_media_active;
1449 }
1450
1451 static void
1452 cgem_add_sysctls(device_t dev)
1453 {
1454         struct cgem_softc *sc = device_get_softc(dev);
1455         struct sysctl_ctx_list *ctx;
1456         struct sysctl_oid_list *child;
1457         struct sysctl_oid *tree;
1458
1459         ctx = device_get_sysctl_ctx(dev);
1460         child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
1461
1462         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW,
1463                        &sc->rxbufs, 0,
1464                        "Number receive buffers to provide");
1465
1466         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW,
1467                        &sc->rxhangwar, 0,
1468                        "Enable receive hang work-around");
1469
1470         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD,
1471                         &sc->rxoverruns, 0,
1472                         "Receive overrun events");
1473
1474         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD,
1475                         &sc->rxnobufs, 0,
1476                         "Receive buf queue empty events");
1477
1478         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD,
1479                         &sc->rxdmamapfails, 0,
1480                         "Receive DMA map failures");
1481
1482         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD,
1483                         &sc->txfull, 0,
1484                         "Transmit ring full events");
1485
1486         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD,
1487                         &sc->txdmamapfails, 0,
1488                         "Transmit DMA map failures");
1489
1490         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD,
1491                         &sc->txdefrags, 0,
1492                         "Transmit m_defrag() calls");
1493
1494         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD,
1495                         &sc->txdefragfails, 0,
1496                         "Transmit m_defrag() failures");
1497
1498         tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
1499                                NULL, "GEM statistics");
1500         child = SYSCTL_CHILDREN(tree);
1501
1502         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD,
1503                          &sc->stats.tx_bytes, "Total bytes transmitted");
1504
1505         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD,
1506                         &sc->stats.tx_frames, 0, "Total frames transmitted");
1507         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD,
1508                         &sc->stats.tx_frames_bcast, 0,
1509                         "Number broadcast frames transmitted");
1510         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD,
1511                         &sc->stats.tx_frames_multi, 0,
1512                         "Number multicast frames transmitted");
1513         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause",
1514                         CTLFLAG_RD, &sc->stats.tx_frames_pause, 0,
1515                         "Number pause frames transmitted");
1516         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD,
1517                         &sc->stats.tx_frames_64b, 0,
1518                         "Number frames transmitted of size 64 bytes or less");
1519         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD,
1520                         &sc->stats.tx_frames_65to127b, 0,
1521                         "Number frames transmitted of size 65-127 bytes");
1522         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b",
1523                         CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0,
1524                         "Number frames transmitted of size 128-255 bytes");
1525         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b",
1526                         CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0,
1527                         "Number frames transmitted of size 256-511 bytes");
1528         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b",
1529                         CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0,
1530                         "Number frames transmitted of size 512-1023 bytes");
1531         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b",
1532                         CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0,
1533                         "Number frames transmitted of size 1024-1536 bytes");
1534         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs",
1535                         CTLFLAG_RD, &sc->stats.tx_under_runs, 0,
1536                         "Number transmit under-run events");
1537         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn",
1538                         CTLFLAG_RD, &sc->stats.tx_single_collisn, 0,
1539                         "Number single-collision transmit frames");
1540         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn",
1541                         CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0,
1542                         "Number multi-collision transmit frames");
1543         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn",
1544                         CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0,
1545                         "Number excessive collision transmit frames");
1546         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn",
1547                         CTLFLAG_RD, &sc->stats.tx_late_collisn, 0,
1548                         "Number late-collision transmit frames");
1549         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames",
1550                         CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0,
1551                         "Number deferred transmit frames");
1552         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs",
1553                         CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0,
1554                         "Number carrier sense errors on transmit");
1555
1556         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD,
1557                          &sc->stats.rx_bytes, "Total bytes received");
1558
1559         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD,
1560                         &sc->stats.rx_frames, 0, "Total frames received");
1561         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast",
1562                         CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0,
1563                         "Number broadcast frames received");
1564         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi",
1565                         CTLFLAG_RD, &sc->stats.rx_frames_multi, 0,
1566                         "Number multicast frames received");
1567         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause",
1568                         CTLFLAG_RD, &sc->stats.rx_frames_pause, 0,
1569                         "Number pause frames received");
1570         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b",
1571                         CTLFLAG_RD, &sc->stats.rx_frames_64b, 0,
1572                         "Number frames received of size 64 bytes or less");
1573         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b",
1574                         CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0,
1575                         "Number frames received of size 65-127 bytes");
1576         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b",
1577                         CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0,
1578                         "Number frames received of size 128-255 bytes");
1579         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b",
1580                         CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0,
1581                         "Number frames received of size 256-511 bytes");
1582         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b",
1583                         CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0,
1584                         "Number frames received of size 512-1023 bytes");
1585         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b",
1586                         CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0,
1587                         "Number frames received of size 1024-1536 bytes");
1588         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize",
1589                         CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0,
1590                         "Number undersize frames received");
1591         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize",
1592                         CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0,
1593                         "Number oversize frames received");
1594         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber",
1595                         CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0,
1596                         "Number jabber frames received");
1597         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs",
1598                         CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0,
1599                         "Number frames received with FCS errors");
1600         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs",
1601                         CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0,
1602                         "Number frames received with length errors");
1603         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs",
1604                         CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0,
1605                         "Number receive symbol errors");
1606         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs",
1607                         CTLFLAG_RD, &sc->stats.rx_align_errs, 0,
1608                         "Number receive alignment errors");
1609         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs",
1610                         CTLFLAG_RD, &sc->stats.rx_resource_errs, 0,
1611                         "Number frames received when no rx buffer available");
1612         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs",
1613                         CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0,
1614                         "Number frames received but not copied due to "
1615                         "receive overrun");
1616         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs",
1617                         CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0,
1618                         "Number frames received with IP header checksum "
1619                         "errors");
1620         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs",
1621                         CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0,
1622                         "Number frames received with TCP checksum errors");
1623         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs",
1624                         CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0,
1625                         "Number frames received with UDP checksum errors");
1626 }
1627
1628
1629 static int
1630 cgem_probe(device_t dev)
1631 {
1632
1633         if (!ofw_bus_status_okay(dev))
1634                 return (ENXIO);
1635
1636         if (!ofw_bus_is_compatible(dev, "cadence,gem"))
1637                 return (ENXIO);
1638
1639         device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface");
1640         return (0);
1641 }
1642
1643 static int
1644 cgem_attach(device_t dev)
1645 {
1646         struct cgem_softc *sc = device_get_softc(dev);
1647         if_t ifp = NULL;
1648         phandle_t node;
1649         pcell_t cell;
1650         int rid, err;
1651         u_char eaddr[ETHER_ADDR_LEN];
1652
1653         sc->dev = dev;
1654         CGEM_LOCK_INIT(sc);
1655
1656         /* Get reference clock number and base divider from fdt. */
1657         node = ofw_bus_get_node(dev);
1658         sc->ref_clk_num = 0;
1659         if (OF_getprop(node, "ref-clock-num", &cell, sizeof(cell)) > 0)
1660                 sc->ref_clk_num = fdt32_to_cpu(cell);
1661
1662         /* Get memory resource. */
1663         rid = 0;
1664         sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1665                                              RF_ACTIVE);
1666         if (sc->mem_res == NULL) {
1667                 device_printf(dev, "could not allocate memory resources.\n");
1668                 return (ENOMEM);
1669         }
1670
1671         /* Get IRQ resource. */
1672         rid = 0;
1673         sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1674                                              RF_ACTIVE);
1675         if (sc->irq_res == NULL) {
1676                 device_printf(dev, "could not allocate interrupt resource.\n");
1677                 cgem_detach(dev);
1678                 return (ENOMEM);
1679         }
1680
1681         /* Set up ifnet structure. */
1682         ifp = sc->ifp = if_alloc(IFT_ETHER);
1683         if (ifp == NULL) {
1684                 device_printf(dev, "could not allocate ifnet structure\n");
1685                 cgem_detach(dev);
1686                 return (ENOMEM);
1687         }
1688         if_setsoftc(ifp, sc);
1689         if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev));
1690         if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1691         if_setinitfn(ifp, cgem_init);
1692         if_setioctlfn(ifp, cgem_ioctl);
1693         if_setstartfn(ifp, cgem_start);
1694         if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
1695                               IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM, 0);
1696         if_setsendqlen(ifp, CGEM_NUM_TX_DESCS);
1697         if_setsendqready(ifp);
1698
1699         /* Disable hardware checksumming by default. */
1700         if_sethwassist(ifp, 0);
1701         if_setcapenable(ifp, if_getcapabilities(ifp) &
1702                 ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM));
1703
1704         sc->if_old_flags = if_getflags(ifp);
1705         sc->rxbufs = DEFAULT_NUM_RX_BUFS;
1706         sc->rxhangwar = 1;
1707
1708         /* Reset hardware. */
1709         CGEM_LOCK(sc);
1710         cgem_reset(sc);
1711         CGEM_UNLOCK(sc);
1712
1713         /* Attach phy to mii bus. */
1714         err = mii_attach(dev, &sc->miibus, ifp,
1715                          cgem_ifmedia_upd, cgem_ifmedia_sts,
1716                          BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1717         if (err) {
1718                 device_printf(dev, "attaching PHYs failed\n");
1719                 cgem_detach(dev);
1720                 return (err);
1721         }
1722
1723         /* Set up TX and RX descriptor area. */
1724         err = cgem_setup_descs(sc);
1725         if (err) {
1726                 device_printf(dev, "could not set up dma mem for descs.\n");
1727                 cgem_detach(dev);
1728                 return (ENOMEM);
1729         }
1730
1731         /* Get a MAC address. */
1732         cgem_get_mac(sc, eaddr);
1733
1734         /* Start ticks. */
1735         callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1736
1737         ether_ifattach(ifp, eaddr);
1738
1739         err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
1740                              INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
1741         if (err) {
1742                 device_printf(dev, "could not set interrupt handler.\n");
1743                 ether_ifdetach(ifp);
1744                 cgem_detach(dev);
1745                 return (err);
1746         }
1747
1748         cgem_add_sysctls(dev);
1749
1750         return (0);
1751 }
1752
1753 static int
1754 cgem_detach(device_t dev)
1755 {
1756         struct cgem_softc *sc = device_get_softc(dev);
1757         int i;
1758
1759         if (sc == NULL)
1760                 return (ENODEV);
1761
1762         if (device_is_attached(dev)) {
1763                 CGEM_LOCK(sc);
1764                 cgem_stop(sc);
1765                 CGEM_UNLOCK(sc);
1766                 callout_drain(&sc->tick_ch);
1767                 if_setflagbits(sc->ifp, 0, IFF_UP);
1768                 ether_ifdetach(sc->ifp);
1769         }
1770
1771         if (sc->miibus != NULL) {
1772                 device_delete_child(dev, sc->miibus);
1773                 sc->miibus = NULL;
1774         }
1775
1776         /* Release resources. */
1777         if (sc->mem_res != NULL) {
1778                 bus_release_resource(dev, SYS_RES_MEMORY,
1779                                      rman_get_rid(sc->mem_res), sc->mem_res);
1780                 sc->mem_res = NULL;
1781         }
1782         if (sc->irq_res != NULL) {
1783                 if (sc->intrhand)
1784                         bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1785                 bus_release_resource(dev, SYS_RES_IRQ,
1786                                      rman_get_rid(sc->irq_res), sc->irq_res);
1787                 sc->irq_res = NULL;
1788         }
1789
1790         /* Release DMA resources. */
1791         if (sc->rxring != NULL) {
1792                 if (sc->rxring_physaddr != 0) {
1793                         bus_dmamap_unload(sc->desc_dma_tag,
1794                                           sc->rxring_dma_map);
1795                         sc->rxring_physaddr = 0;
1796                 }
1797                 bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
1798                                 sc->rxring_dma_map);
1799                 sc->rxring = NULL;
1800                 for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
1801                         if (sc->rxring_m_dmamap[i] != NULL) {
1802                                 bus_dmamap_destroy(sc->mbuf_dma_tag,
1803                                                    sc->rxring_m_dmamap[i]);
1804                                 sc->rxring_m_dmamap[i] = NULL;
1805                         }
1806         }
1807         if (sc->txring != NULL) {
1808                 if (sc->txring_physaddr != 0) {
1809                         bus_dmamap_unload(sc->desc_dma_tag,
1810                                           sc->txring_dma_map);
1811                         sc->txring_physaddr = 0;
1812                 }
1813                 bus_dmamem_free(sc->desc_dma_tag, sc->txring,
1814                                 sc->txring_dma_map);
1815                 sc->txring = NULL;
1816                 for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
1817                         if (sc->txring_m_dmamap[i] != NULL) {
1818                                 bus_dmamap_destroy(sc->mbuf_dma_tag,
1819                                                    sc->txring_m_dmamap[i]);
1820                                 sc->txring_m_dmamap[i] = NULL;
1821                         }
1822         }
1823         if (sc->desc_dma_tag != NULL) {
1824                 bus_dma_tag_destroy(sc->desc_dma_tag);
1825                 sc->desc_dma_tag = NULL;
1826         }
1827         if (sc->mbuf_dma_tag != NULL) {
1828                 bus_dma_tag_destroy(sc->mbuf_dma_tag);
1829                 sc->mbuf_dma_tag = NULL;
1830         }
1831
1832         bus_generic_detach(dev);
1833
1834         CGEM_LOCK_DESTROY(sc);
1835
1836         return (0);
1837 }
1838
1839 static device_method_t cgem_methods[] = {
1840         /* Device interface */
1841         DEVMETHOD(device_probe,         cgem_probe),
1842         DEVMETHOD(device_attach,        cgem_attach),
1843         DEVMETHOD(device_detach,        cgem_detach),
1844
1845         /* Bus interface */
1846         DEVMETHOD(bus_child_detached,   cgem_child_detached),
1847
1848         /* MII interface */
1849         DEVMETHOD(miibus_readreg,       cgem_miibus_readreg),
1850         DEVMETHOD(miibus_writereg,      cgem_miibus_writereg),
1851         DEVMETHOD(miibus_statchg,       cgem_miibus_statchg),
1852         DEVMETHOD(miibus_linkchg,       cgem_miibus_linkchg),
1853
1854         DEVMETHOD_END
1855 };
1856
1857 static driver_t cgem_driver = {
1858         "cgem",
1859         cgem_methods,
1860         sizeof(struct cgem_softc),
1861 };
1862
1863 DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL);
1864 DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL);
1865 MODULE_DEPEND(cgem, miibus, 1, 1, 1);
1866 MODULE_DEPEND(cgem, ether, 1, 1, 1);