]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/cadence/if_cgem.c
Copy head (r256279) to stable/10 as part of the 10.0-RELEASE cycle.
[FreeBSD/stable/10.git] / sys / dev / cadence / if_cgem.c
1 /*-
2  * Copyright (c) 2012-2013 Thomas Skibo
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 /*
28  * A network interface driver for Cadence GEM Gigabit Ethernet
29  * interface such as the one used in Xilinx Zynq-7000 SoC.
30  *
31  * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
32  * (v1.4) November 16, 2012.  Xilinx doc UG585.  GEM is covered in Ch. 16
33  * and register definitions are in appendix B.18.
34  */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/module.h>
46 #include <sys/rman.h>
47 #include <sys/socket.h>
48 #include <sys/sockio.h>
49 #include <sys/sysctl.h>
50
51 #include <machine/bus.h>
52
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_arp.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_mib.h>
59 #include <net/if_types.h>
60
61 #ifdef INET
62 #include <netinet/in.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in_var.h>
65 #include <netinet/ip.h>
66 #endif
67
68 #include <net/bpf.h>
69 #include <net/bpfdesc.h>
70
71 #include <dev/fdt/fdt_common.h>
72 #include <dev/ofw/ofw_bus.h>
73 #include <dev/ofw/ofw_bus_subr.h>
74
75 #include <dev/mii/mii.h>
76 #include <dev/mii/miivar.h>
77
78 #include <dev/cadence/if_cgem_hw.h>
79
80 #include "miibus_if.h"
81
82 #define IF_CGEM_NAME "cgem"
83
84 #define CGEM_NUM_RX_DESCS       256     /* size of receive descriptor ring */
85 #define CGEM_NUM_TX_DESCS       256     /* size of transmit descriptor ring */
86
87 #define MAX_DESC_RING_SIZE (MAX(CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),\
88                                 CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc)))
89
90
91 /* Default for sysctl rxbufs.  Must be < CGEM_NUM_RX_DESCS of course. */
92 #define DEFAULT_NUM_RX_BUFS     64      /* number of receive bufs to queue. */
93
94 #define TX_MAX_DMA_SEGS         4       /* maximum segs in a tx mbuf dma */
95
96 #define CGEM_CKSUM_ASSIST       (CSUM_IP | CSUM_TCP | CSUM_UDP | \
97                                  CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
98
99 struct cgem_softc {
100         struct ifnet            *ifp;
101         struct mtx              sc_mtx;
102         device_t                dev;
103         device_t                miibus;
104         int                     if_old_flags;
105         struct resource         *mem_res;
106         struct resource         *irq_res;
107         void                    *intrhand;
108         struct callout          tick_ch;
109         uint32_t                net_ctl_shadow;
110         u_char                  eaddr[6];
111
112         bus_dma_tag_t           desc_dma_tag;
113         bus_dma_tag_t           mbuf_dma_tag;
114
115         /* receive descriptor ring */
116         struct cgem_rx_desc     *rxring;
117         bus_addr_t              rxring_physaddr;
118         struct mbuf             *rxring_m[CGEM_NUM_RX_DESCS];
119         bus_dmamap_t            rxring_m_dmamap[CGEM_NUM_RX_DESCS];
120         int                     rxring_hd_ptr;  /* where to put rcv bufs */
121         int                     rxring_tl_ptr;  /* where to get receives */
122         int                     rxring_queued;  /* how many rcv bufs queued */
123         bus_dmamap_t            rxring_dma_map;
124         int                     rxbufs;         /* tunable number rcv bufs */
125         int                     rxoverruns;     /* rx ring overruns */
126
127         /* transmit descriptor ring */
128         struct cgem_tx_desc     *txring;
129         bus_addr_t              txring_physaddr;
130         struct mbuf             *txring_m[CGEM_NUM_TX_DESCS];
131         bus_dmamap_t            txring_m_dmamap[CGEM_NUM_TX_DESCS];
132         int                     txring_hd_ptr;  /* where to put next xmits */
133         int                     txring_tl_ptr;  /* next xmit mbuf to free */
134         int                     txring_queued;  /* num xmits segs queued */
135         bus_dmamap_t            txring_dma_map;
136 };
137
138 #define RD4(sc, off)            (bus_read_4((sc)->mem_res, (off)))
139 #define WR4(sc, off, val)       (bus_write_4((sc)->mem_res, (off), (val)))
140 #define BARRIER(sc, off, len, flags) \
141         (bus_barrier((sc)->mem_res, (off), (len), (flags))
142
143 #define CGEM_LOCK(sc)           mtx_lock(&(sc)->sc_mtx)
144 #define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
145 #define CGEM_LOCK_INIT(sc)      \
146         mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \
147                  MTX_NETWORK_LOCK, MTX_DEF)
148 #define CGEM_LOCK_DESTROY(sc)   mtx_destroy(&(sc)->sc_mtx)
149 #define CGEM_ASSERT_LOCKED(sc)  mtx_assert(&(sc)->sc_mtx, MA_OWNED)
150
151 static devclass_t cgem_devclass;
152
153 static int cgem_probe(device_t dev);
154 static int cgem_attach(device_t dev);
155 static int cgem_detach(device_t dev);
156 static void cgem_tick(void *);
157 static void cgem_intr(void *);
158
159 static void
160 cgem_get_mac(struct cgem_softc *sc, u_char eaddr[])
161 {
162         int i;
163         uint32_t rnd;
164
165         /* See if boot loader gave us a MAC address already. */
166         for (i = 0; i < 4; i++) {
167                 uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i));
168                 uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff;
169                 if (low != 0 || high != 0) {
170                         eaddr[0] = low & 0xff;
171                         eaddr[1] = (low >> 8) & 0xff;
172                         eaddr[2] = (low >> 16) & 0xff;
173                         eaddr[3] = (low >> 24) & 0xff;
174                         eaddr[4] = high & 0xff;
175                         eaddr[5] = (high >> 8) & 0xff;
176                         break;
177                 }
178         }
179
180         /* No MAC from boot loader?  Assign a random one. */
181         if (i == 4) {
182                 rnd = arc4random();
183
184                 eaddr[0] = 'b';
185                 eaddr[1] = 's';
186                 eaddr[2] = 'd';
187                 eaddr[3] = (rnd >> 16) & 0xff;
188                 eaddr[4] = (rnd >> 8) & 0xff;
189                 eaddr[5] = rnd & 0xff;
190
191                 device_printf(sc->dev, "no mac address found, assigning "
192                               "random: %02x:%02x:%02x:%02x:%02x:%02x\n",
193                               eaddr[0], eaddr[1], eaddr[2],
194                               eaddr[3], eaddr[4], eaddr[5]);
195
196                 WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
197                     (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
198                 WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
199         }
200 }
201
202 /* cgem_mac_hash():  map 48-bit address to a 6-bit hash.
203  * The 6-bit hash corresponds to a bit in a 64-bit hash
204  * register.  Setting that bit in the hash register enables
205  * reception of all frames with a destination address that hashes
206  * to that 6-bit value.
207  *
208  * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
209  * Reference Manual.  Bits 0-5 in the hash are the exclusive-or of
210  * every sixth bit in the destination address.
211  */
212 static int
213 cgem_mac_hash(u_char eaddr[])
214 {
215         int hash;
216         int i, j;
217
218         hash = 0;
219         for (i = 0; i < 6; i++)
220                 for (j = i; j < 48; j += 6)
221                         if ((eaddr[j >> 3] & (1 << (j & 7))) != 0)
222                                 hash ^= (1 << i);
223
224         return hash;
225 }
226
227 /* After any change in rx flags or multi-cast addresses, set up
228  * hash registers and net config register bits.
229  */
230 static void
231 cgem_rx_filter(struct cgem_softc *sc)
232 {
233         struct ifnet *ifp = sc->ifp;
234         struct ifmultiaddr *ifma;
235         int index;
236         uint32_t hash_hi, hash_lo;
237         uint32_t net_cfg;
238
239         hash_hi = 0;
240         hash_lo = 0;
241
242         net_cfg = RD4(sc, CGEM_NET_CFG);
243
244         net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
245                      CGEM_NET_CFG_NO_BCAST | 
246                      CGEM_NET_CFG_COPY_ALL);
247
248         if ((ifp->if_flags & IFF_PROMISC) != 0)
249                 net_cfg |= CGEM_NET_CFG_COPY_ALL;
250         else {
251                 if ((ifp->if_flags & IFF_BROADCAST) == 0)
252                         net_cfg |= CGEM_NET_CFG_NO_BCAST;
253                 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
254                         hash_hi = 0xffffffff;
255                         hash_lo = 0xffffffff;
256                 } else {
257                         if_maddr_rlock(ifp);
258                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
259                                 if (ifma->ifma_addr->sa_family != AF_LINK)
260                                         continue;
261                                 index = cgem_mac_hash(
262                                         LLADDR((struct sockaddr_dl *)
263                                                ifma->ifma_addr));
264                                 if (index > 31)
265                                         hash_hi |= (1<<(index-32));
266                                 else
267                                         hash_lo |= (1<<index);
268                         }
269                         if_maddr_runlock(ifp);
270                 }
271
272                 if (hash_hi != 0 || hash_lo != 0)
273                         net_cfg |= CGEM_NET_CFG_MULTI_HASH_EN;
274         }
275
276         WR4(sc, CGEM_HASH_TOP, hash_hi);
277         WR4(sc, CGEM_HASH_BOT, hash_lo);
278         WR4(sc, CGEM_NET_CFG, net_cfg);
279 }
280
281 /* For bus_dmamap_load() callback. */
282 static void
283 cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
284 {
285
286         if (nsegs != 1 || error != 0)
287                 return;
288         *(bus_addr_t *)arg = segs[0].ds_addr;
289 }
290
291 /* Create DMA'able descriptor rings. */
292 static int
293 cgem_setup_descs(struct cgem_softc *sc)
294 {
295         int i, err;
296
297         sc->txring = NULL;
298         sc->rxring = NULL;
299
300         /* Allocate non-cached DMA space for RX and TX descriptors.
301          */
302         err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
303                                  BUS_SPACE_MAXADDR_32BIT,
304                                  BUS_SPACE_MAXADDR,
305                                  NULL, NULL,
306                                  MAX_DESC_RING_SIZE,
307                                  1,
308                                  MAX_DESC_RING_SIZE,
309                                  0,
310                                  busdma_lock_mutex,
311                                  &sc->sc_mtx,
312                                  &sc->desc_dma_tag);
313         if (err)
314                 return (err);
315
316         /* Set up a bus_dma_tag for mbufs. */
317         err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
318                                  BUS_SPACE_MAXADDR_32BIT,
319                                  BUS_SPACE_MAXADDR,
320                                  NULL, NULL,
321                                  MCLBYTES,
322                                  TX_MAX_DMA_SEGS,
323                                  MCLBYTES,
324                                  0,
325                                  busdma_lock_mutex,
326                                  &sc->sc_mtx,
327                                  &sc->mbuf_dma_tag);
328         if (err)
329                 return (err);
330
331         /* Allocate DMA memory in non-cacheable space. */
332         err = bus_dmamem_alloc(sc->desc_dma_tag,
333                                (void **)&sc->rxring,
334                                BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
335                                &sc->rxring_dma_map);
336         if (err)
337                 return (err);
338
339         /* Load descriptor DMA memory. */
340         err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
341                               (void *)sc->rxring,
342                               CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),
343                               cgem_getaddr, &sc->rxring_physaddr,
344                               BUS_DMA_NOWAIT);
345         if (err)
346                 return (err);
347
348         /* Initialize RX descriptors. */
349         for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
350                 sc->rxring[i].addr = CGEM_RXDESC_OWN;
351                 sc->rxring[i].ctl = 0;
352                 sc->rxring_m[i] = NULL;
353                 err = bus_dmamap_create(sc->mbuf_dma_tag, 0,
354                                         &sc->rxring_m_dmamap[i]);
355                 if (err)
356                         return (err);
357         }
358         sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
359
360         sc->rxring_hd_ptr = 0;
361         sc->rxring_tl_ptr = 0;
362         sc->rxring_queued = 0;
363
364         /* Allocate DMA memory for TX descriptors in non-cacheable space. */
365         err = bus_dmamem_alloc(sc->desc_dma_tag,
366                                (void **)&sc->txring,
367                                BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
368                                &sc->txring_dma_map);
369         if (err)
370                 return (err);
371
372         /* Load TX descriptor DMA memory. */
373         err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map,
374                               (void *)sc->txring,
375                               CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc),
376                               cgem_getaddr, &sc->txring_physaddr, 
377                               BUS_DMA_NOWAIT);
378         if (err)
379                 return (err);
380
381         /* Initialize TX descriptor ring. */
382         for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
383                 sc->txring[i].addr = 0;
384                 sc->txring[i].ctl = CGEM_TXDESC_USED;
385                 sc->txring_m[i] = NULL;
386                 err = bus_dmamap_create(sc->mbuf_dma_tag, 0,
387                                         &sc->txring_m_dmamap[i]);
388                 if (err)
389                         return (err);
390         }
391         sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
392
393         sc->txring_hd_ptr = 0;
394         sc->txring_tl_ptr = 0;
395         sc->txring_queued = 0;
396
397         return (0);
398 }
399
400 /* Fill receive descriptor ring with mbufs. */
401 static void
402 cgem_fill_rqueue(struct cgem_softc *sc)
403 {
404         struct mbuf *m = NULL;
405         bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
406         int nsegs;
407
408         CGEM_ASSERT_LOCKED(sc);
409
410         while (sc->rxring_queued < sc->rxbufs) {
411                 /* Get a cluster mbuf. */
412                 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
413                 if (m == NULL)
414                         break;
415
416                 m->m_len = MCLBYTES;
417                 m->m_pkthdr.len = MCLBYTES;
418                 m->m_pkthdr.rcvif = sc->ifp;
419
420                 /* Load map and plug in physical address. */
421                 if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, 
422                               sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
423                               segs, &nsegs, BUS_DMA_NOWAIT)) {
424                         /* XXX: warn? */
425                         m_free(m);
426                         break;
427                 }
428                 sc->rxring_m[sc->rxring_hd_ptr] = m;
429
430                 /* Sync cache with receive buffer. */
431                 bus_dmamap_sync(sc->mbuf_dma_tag,
432                                 sc->rxring_m_dmamap[sc->rxring_hd_ptr],
433                                 BUS_DMASYNC_PREREAD);
434
435                 /* Write rx descriptor and increment head pointer. */
436                 sc->rxring[sc->rxring_hd_ptr].ctl = 0;
437                 if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
438                         sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
439                                 CGEM_RXDESC_WRAP;
440                         sc->rxring_hd_ptr = 0;
441                 } else
442                         sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
443                         
444                 sc->rxring_queued++;
445         }
446 }
447
448 /* Pull received packets off of receive descriptor ring. */
449 static void
450 cgem_recv(struct cgem_softc *sc)
451 {
452         struct ifnet *ifp = sc->ifp;
453         struct mbuf *m;
454         uint32_t ctl;
455
456         CGEM_ASSERT_LOCKED(sc);
457
458         /* Pick up all packets in which the OWN bit is set. */
459         while (sc->rxring_queued > 0 &&
460                (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
461
462                 ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
463
464                 /* Grab filled mbuf. */
465                 m = sc->rxring_m[sc->rxring_tl_ptr];
466                 sc->rxring_m[sc->rxring_tl_ptr] = NULL;
467
468                 /* Sync cache with receive buffer. */
469                 bus_dmamap_sync(sc->mbuf_dma_tag,
470                                 sc->rxring_m_dmamap[sc->rxring_tl_ptr],
471                                 BUS_DMASYNC_POSTREAD);
472
473                 /* Unload dmamap. */
474                 bus_dmamap_unload(sc->mbuf_dma_tag,
475                         sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
476
477                 /* Increment tail pointer. */
478                 if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS)
479                         sc->rxring_tl_ptr = 0;
480                 sc->rxring_queued--;
481
482                 /* Check FCS and make sure entire packet landed in one mbuf
483                  * cluster (which is much bigger than the largest ethernet
484                  * packet).
485                  */
486                 if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 ||
487                     (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) !=
488                            (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) {
489                         /* discard. */
490                         m_free(m);
491                         ifp->if_ierrors++;
492                         continue;
493                 }
494
495                 /* Hand it off to upper layers. */
496                 m->m_data += ETHER_ALIGN;
497                 m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK);
498                 m->m_pkthdr.rcvif = ifp;
499                 m->m_pkthdr.len = m->m_len;
500
501                 /* Are we using hardware checksumming?  Check the
502                  * status in the receive descriptor.
503                  */
504                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
505                         /* TCP or UDP checks out, IP checks out too. */
506                         if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
507                             CGEM_RXDESC_CKSUM_STAT_TCP_GOOD ||
508                             (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
509                             CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) {
510                                 m->m_pkthdr.csum_flags |=
511                                         CSUM_IP_CHECKED | CSUM_IP_VALID |
512                                         CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
513                                 m->m_pkthdr.csum_data = 0xffff;
514                         } else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
515                                    CGEM_RXDESC_CKSUM_STAT_IP_GOOD) {
516                                 /* Only IP checks out. */
517                                 m->m_pkthdr.csum_flags |=
518                                         CSUM_IP_CHECKED | CSUM_IP_VALID;
519                                 m->m_pkthdr.csum_data = 0xffff;
520                         }
521                 }
522
523                 ifp->if_ipackets++;
524                 CGEM_UNLOCK(sc);
525                 (*ifp->if_input)(ifp, m);
526                 CGEM_LOCK(sc);
527         }
528 }
529
530 /* Find completed transmits and free their mbufs. */
531 static void
532 cgem_clean_tx(struct cgem_softc *sc)
533 {
534         struct mbuf *m;
535         uint32_t ctl;
536
537         CGEM_ASSERT_LOCKED(sc);
538
539         /* free up finished transmits. */
540         while (sc->txring_queued > 0 &&
541                ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
542                 CGEM_TXDESC_USED) != 0) {
543
544                 /* Sync cache.  nop? */
545                 bus_dmamap_sync(sc->mbuf_dma_tag,
546                                 sc->txring_m_dmamap[sc->txring_tl_ptr],
547                                 BUS_DMASYNC_POSTWRITE);
548
549                 /* Unload DMA map. */
550                 bus_dmamap_unload(sc->mbuf_dma_tag,
551                                   sc->txring_m_dmamap[sc->txring_tl_ptr]);
552
553                 /* Free up the mbuf. */
554                 m = sc->txring_m[sc->txring_tl_ptr];
555                 sc->txring_m[sc->txring_tl_ptr] = NULL;
556                 m_freem(m);
557
558                 /* Check the status. */
559                 if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
560                         /* Serious bus error. log to console. */
561                         device_printf(sc->dev, "cgem_clean_tx: Whoa! "
562                                    "AHB error, addr=0x%x\n",
563                                    sc->txring[sc->txring_tl_ptr].addr);
564                 } else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
565                                    CGEM_TXDESC_LATE_COLL)) != 0) {
566                         sc->ifp->if_oerrors++;
567                 } else
568                         sc->ifp->if_opackets++;
569
570                 /* If the packet spanned more than one tx descriptor,
571                  * skip descriptors until we find the end so that only
572                  * start-of-frame descriptors are processed.
573                  */
574                 while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) {
575                         if ((ctl & CGEM_TXDESC_WRAP) != 0)
576                                 sc->txring_tl_ptr = 0;
577                         else
578                                 sc->txring_tl_ptr++;
579                         sc->txring_queued--;
580
581                         ctl = sc->txring[sc->txring_tl_ptr].ctl;
582
583                         sc->txring[sc->txring_tl_ptr].ctl =
584                                 ctl | CGEM_TXDESC_USED;
585                 }
586
587                 /* Next descriptor. */
588                 if ((ctl & CGEM_TXDESC_WRAP) != 0)
589                         sc->txring_tl_ptr = 0;
590                 else
591                         sc->txring_tl_ptr++;
592                 sc->txring_queued--;
593         }
594 }
595
596 /* Start transmits. */
597 static void
598 cgem_start_locked(struct ifnet *ifp)
599 {
600         struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
601         struct mbuf *m;
602         bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
603         uint32_t ctl;
604         int i, nsegs, wrap, err;
605
606         CGEM_ASSERT_LOCKED(sc);
607
608         if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0)
609                 return;
610
611         for (;;) {
612                 /* Check that there is room in the descriptor ring. */
613                 if (sc->txring_queued >= CGEM_NUM_TX_DESCS -
614                     TX_MAX_DMA_SEGS - 1) {
615
616                         /* Try to make room. */
617                         cgem_clean_tx(sc);
618
619                         /* Still no room? */
620                         if (sc->txring_queued >= CGEM_NUM_TX_DESCS -
621                             TX_MAX_DMA_SEGS - 1) {
622                                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
623                                 break;
624                         }
625                 }
626
627                 /* Grab next transmit packet. */
628                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
629                 if (m == NULL)
630                         break;
631
632                 /* Load DMA map. */
633                 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
634                                       sc->txring_m_dmamap[sc->txring_hd_ptr],
635                                       m, segs, &nsegs, BUS_DMA_NOWAIT);
636                 if (err == EFBIG) {
637                         /* Too many segments!  defrag and try again. */
638                         struct mbuf *m2 = m_defrag(m, M_NOWAIT);
639
640                         if (m2 == NULL) {
641                                 m_freem(m);
642                                 continue;
643                         }
644                         m = m2;
645                         err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
646                                       sc->txring_m_dmamap[sc->txring_hd_ptr],
647                                       m, segs, &nsegs, BUS_DMA_NOWAIT);
648                 }
649                 if (err) {
650                         /* Give up. */
651                         m_freem(m);
652                         continue;
653                 }
654                 sc->txring_m[sc->txring_hd_ptr] = m;
655
656                 /* Sync tx buffer with cache. */
657                 bus_dmamap_sync(sc->mbuf_dma_tag,
658                                 sc->txring_m_dmamap[sc->txring_hd_ptr],
659                                 BUS_DMASYNC_PREWRITE);
660
661                 /* Set wrap flag if next packet might run off end of ring. */
662                 wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
663                         CGEM_NUM_TX_DESCS;
664
665                 /* Fill in the TX descriptors back to front so that USED
666                  * bit in first descriptor is cleared last.
667                  */
668                 for (i = nsegs - 1; i >= 0; i--) {
669                         /* Descriptor address. */
670                         sc->txring[sc->txring_hd_ptr + i].addr =
671                                 segs[i].ds_addr;
672
673                         /* Descriptor control word. */
674                         ctl = segs[i].ds_len;
675                         if (i == nsegs - 1) {
676                                 ctl |= CGEM_TXDESC_LAST_BUF;
677                                 if (wrap)
678                                         ctl |= CGEM_TXDESC_WRAP;
679                         }
680                         sc->txring[sc->txring_hd_ptr + i].ctl = ctl;
681
682                         if (i != 0)
683                                 sc->txring_m[sc->txring_hd_ptr + i] = NULL;
684                 }
685
686                 if (wrap)
687                         sc->txring_hd_ptr = 0;
688                 else
689                         sc->txring_hd_ptr += nsegs;
690                 sc->txring_queued += nsegs;
691
692                 /* Kick the transmitter. */
693                 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
694                     CGEM_NET_CTRL_START_TX);
695         }
696
697 }
698
699 static void
700 cgem_start(struct ifnet *ifp)
701 {
702         struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
703
704         CGEM_LOCK(sc);
705         cgem_start_locked(ifp);
706         CGEM_UNLOCK(sc);
707 }
708
709 /* Respond to changes in media. */
710 static void
711 cgem_media_update(struct cgem_softc *sc, int active)
712 {
713         uint32_t net_cfg;
714
715         CGEM_ASSERT_LOCKED(sc);
716
717         /* Update hardware to reflect phy status. */
718         net_cfg = RD4(sc, CGEM_NET_CFG);
719         net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
720                      CGEM_NET_CFG_FULL_DUPLEX);
721
722         if (IFM_SUBTYPE(active) == IFM_1000_T)
723                 net_cfg |= (CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN);
724         else if (IFM_SUBTYPE(active) == IFM_100_TX)
725                 net_cfg |= CGEM_NET_CFG_SPEED100;
726
727         if ((active & IFM_FDX) != 0)
728                 net_cfg |= CGEM_NET_CFG_FULL_DUPLEX;
729         WR4(sc, CGEM_NET_CFG, net_cfg);
730 }
731
732 static void
733 cgem_tick(void *arg)
734 {
735         struct cgem_softc *sc = (struct cgem_softc *)arg;
736         struct mii_data *mii;
737         int active;
738
739         CGEM_ASSERT_LOCKED(sc);
740
741         /* Poll the phy. */
742         if (sc->miibus != NULL) {
743                 mii = device_get_softc(sc->miibus);
744                 active = mii->mii_media_active;
745                 mii_tick(mii);
746                 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
747                     (IFM_ACTIVE | IFM_AVALID) &&
748                     active != mii->mii_media_active)
749                         cgem_media_update(sc, mii->mii_media_active);
750         }
751
752         /* Next callout in one second. */
753         callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
754 }
755
756 /* Interrupt handler. */
757 static void
758 cgem_intr(void *arg)
759 {
760         struct cgem_softc *sc = (struct cgem_softc *)arg;
761         uint32_t istatus;
762
763         CGEM_LOCK(sc);
764
765         if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
766                 CGEM_UNLOCK(sc);
767                 return;
768         }
769
770         istatus = RD4(sc, CGEM_INTR_STAT);
771         WR4(sc, CGEM_INTR_STAT, istatus &
772             (CGEM_INTR_RX_COMPLETE | CGEM_INTR_TX_USED_READ |
773              CGEM_INTR_RX_OVERRUN | CGEM_INTR_HRESP_NOT_OK));
774
775         /* Hresp not ok.  Something very bad with DMA.  Try to clear. */
776         if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) {
777                 printf("cgem_intr: hresp not okay! rx_status=0x%x\n",
778                        RD4(sc, CGEM_RX_STAT));
779                 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK);
780         }
781
782         /* Transmitter has idled.  Free up any spent transmit buffers. */
783         if ((istatus & CGEM_INTR_TX_USED_READ) != 0)
784                 cgem_clean_tx(sc);
785
786         /* Packets received or overflow. */
787         if ((istatus & (CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN)) != 0) {
788                 cgem_recv(sc);
789                 cgem_fill_rqueue(sc);
790                 if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) {
791                         /* Clear rx status register. */
792                         sc->rxoverruns++;
793                         WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
794                 }
795         }
796
797         CGEM_UNLOCK(sc);
798 }
799
800 /* Reset hardware. */
801 static void
802 cgem_reset(struct cgem_softc *sc)
803 {
804
805         CGEM_ASSERT_LOCKED(sc);
806
807         WR4(sc, CGEM_NET_CTRL, 0);
808         WR4(sc, CGEM_NET_CFG, 0);
809         WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
810         WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
811         WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
812         WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL);
813         WR4(sc, CGEM_HASH_BOT, 0);
814         WR4(sc, CGEM_HASH_TOP, 0);
815         WR4(sc, CGEM_TX_QBAR, 0);       /* manual says do this. */
816         WR4(sc, CGEM_RX_QBAR, 0);
817
818         /* Get management port running even if interface is down. */
819         WR4(sc, CGEM_NET_CFG,
820             CGEM_NET_CFG_DBUS_WIDTH_32 |
821             CGEM_NET_CFG_MDC_CLK_DIV_64);
822
823         sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
824         WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
825 }
826
827 /* Bring up the hardware. */
828 static void
829 cgem_config(struct cgem_softc *sc)
830 {
831         uint32_t net_cfg;
832         uint32_t dma_cfg;
833
834         CGEM_ASSERT_LOCKED(sc);
835
836         /* Program Net Config Register. */
837         net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 |
838                 CGEM_NET_CFG_MDC_CLK_DIV_64 |
839                 CGEM_NET_CFG_FCS_REMOVE |
840                 CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
841                 CGEM_NET_CFG_GIGE_EN |
842                 CGEM_NET_CFG_FULL_DUPLEX |
843                 CGEM_NET_CFG_SPEED100;
844
845         /* Enable receive checksum offloading? */
846         if ((sc->ifp->if_capenable & IFCAP_RXCSUM) != 0)
847                 net_cfg |=  CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
848
849         WR4(sc, CGEM_NET_CFG, net_cfg);
850
851         /* Program DMA Config Register. */
852         dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) |
853                 CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
854                 CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
855                 CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16;
856
857         /* Enable transmit checksum offloading? */
858         if ((sc->ifp->if_capenable & IFCAP_TXCSUM) != 0)
859                 dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN;
860
861         WR4(sc, CGEM_DMA_CFG, dma_cfg);
862
863         /* Write the rx and tx descriptor ring addresses to the QBAR regs. */
864         WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr);
865         WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr);
866         
867         /* Enable rx and tx. */
868         sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
869         WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
870
871         /* Set up interrupts. */
872         WR4(sc, CGEM_INTR_EN,
873             CGEM_INTR_RX_COMPLETE | CGEM_INTR_TX_USED_READ |
874             CGEM_INTR_RX_OVERRUN | CGEM_INTR_HRESP_NOT_OK);
875 }
876
877 /* Turn on interface and load up receive ring with buffers. */
878 static void
879 cgem_init_locked(struct cgem_softc *sc)
880 {
881         struct mii_data *mii;
882
883         CGEM_ASSERT_LOCKED(sc);
884
885         if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
886                 return;
887
888         cgem_config(sc);
889         cgem_fill_rqueue(sc);
890
891         sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
892         sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
893
894         mii = device_get_softc(sc->miibus);
895         mii_pollstat(mii);
896         cgem_media_update(sc, mii->mii_media_active);
897         cgem_start_locked(sc->ifp);
898
899         callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
900 }
901
902 static void
903 cgem_init(void *arg)
904 {
905         struct cgem_softc *sc = (struct cgem_softc *)arg;
906
907         CGEM_LOCK(sc);
908         cgem_init_locked(sc);
909         CGEM_UNLOCK(sc);
910 }
911
912 /* Turn off interface.  Free up any buffers in transmit or receive queues. */
913 static void
914 cgem_stop(struct cgem_softc *sc)
915 {
916         int i;
917
918         CGEM_ASSERT_LOCKED(sc);
919
920         callout_stop(&sc->tick_ch);
921
922         /* Shut down hardware. */
923         cgem_reset(sc);
924
925         /* Clear out transmit queue. */
926         for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
927                 sc->txring[i].ctl = CGEM_TXDESC_USED;
928                 sc->txring[i].addr = 0;
929                 if (sc->txring_m[i]) {
930                         bus_dmamap_unload(sc->mbuf_dma_tag,
931                                           sc->txring_m_dmamap[i]);
932                         m_freem(sc->txring_m[i]);
933                         sc->txring_m[i] = NULL;
934                 }
935         }
936         sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
937
938         sc->txring_hd_ptr = 0;
939         sc->txring_tl_ptr = 0;
940         sc->txring_queued = 0;
941
942         /* Clear out receive queue. */
943         for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
944                 sc->rxring[i].addr = CGEM_RXDESC_OWN;
945                 sc->rxring[i].ctl = 0;
946                 if (sc->rxring_m[i]) {
947                         /* Unload dmamap. */
948                         bus_dmamap_unload(sc->mbuf_dma_tag,
949                                   sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
950
951                         m_freem(sc->rxring_m[i]);
952                         sc->rxring_m[i] = NULL;
953                 }
954         }
955         sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
956
957         sc->rxring_hd_ptr = 0;
958         sc->rxring_tl_ptr = 0;
959         sc->rxring_queued = 0;
960 }
961
962
963 static int
964 cgem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
965 {
966         struct cgem_softc *sc = ifp->if_softc;
967         struct ifreq *ifr = (struct ifreq *)data;
968         struct mii_data *mii;
969         int error = 0, mask;
970
971         switch (cmd) {
972         case SIOCSIFFLAGS:
973                 CGEM_LOCK(sc);
974                 if ((ifp->if_flags & IFF_UP) != 0) {
975                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
976                                 if (((ifp->if_flags ^ sc->if_old_flags) &
977                                      (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
978                                         cgem_rx_filter(sc);
979                                 }
980                         } else {
981                                 cgem_init_locked(sc);
982                         }
983                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
984                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
985                         cgem_stop(sc);
986                 }
987                 sc->if_old_flags = ifp->if_flags;
988                 CGEM_UNLOCK(sc);
989                 break;
990
991         case SIOCADDMULTI:
992         case SIOCDELMULTI:
993                 /* Set up multi-cast filters. */
994                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
995                         CGEM_LOCK(sc);
996                         cgem_rx_filter(sc);
997                         CGEM_UNLOCK(sc);
998                 }
999                 break;
1000
1001         case SIOCSIFMEDIA:
1002         case SIOCGIFMEDIA:
1003                 mii = device_get_softc(sc->miibus);
1004                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1005                 break;
1006
1007         case SIOCSIFCAP:
1008                 CGEM_LOCK(sc);
1009                 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1010
1011                 if ((mask & IFCAP_TXCSUM) != 0) {
1012                         if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) {
1013                                 /* Turn on TX checksumming. */
1014                                 ifp->if_capenable |= (IFCAP_TXCSUM |
1015                                                       IFCAP_TXCSUM_IPV6);
1016                                 ifp->if_hwassist |= CGEM_CKSUM_ASSIST;
1017
1018                                 WR4(sc, CGEM_DMA_CFG,
1019                                     RD4(sc, CGEM_DMA_CFG) |
1020                                      CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1021                         } else {
1022                                 /* Turn off TX checksumming. */
1023                                 ifp->if_capenable &= ~(IFCAP_TXCSUM |
1024                                                        IFCAP_TXCSUM_IPV6);
1025                                 ifp->if_hwassist &= ~CGEM_CKSUM_ASSIST;
1026
1027                                 WR4(sc, CGEM_DMA_CFG,
1028                                     RD4(sc, CGEM_DMA_CFG) &
1029                                      ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1030                         }
1031                 }
1032                 if ((mask & IFCAP_RXCSUM) != 0) {
1033                         if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) {
1034                                 /* Turn on RX checksumming. */
1035                                 ifp->if_capenable |= (IFCAP_RXCSUM |
1036                                                       IFCAP_RXCSUM_IPV6);
1037                                 WR4(sc, CGEM_NET_CFG,
1038                                     RD4(sc, CGEM_NET_CFG) |
1039                                      CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1040                         } else {
1041                                 /* Turn off RX checksumming. */
1042                                 ifp->if_capenable &= ~(IFCAP_RXCSUM |
1043                                                        IFCAP_RXCSUM_IPV6);
1044                                 WR4(sc, CGEM_NET_CFG,
1045                                     RD4(sc, CGEM_NET_CFG) &
1046                                      ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1047                         }
1048                 }
1049
1050                 CGEM_UNLOCK(sc);
1051                 break;
1052         default:
1053                 error = ether_ioctl(ifp, cmd, data);
1054                 break;
1055         }
1056
1057         return (error);
1058 }
1059
1060 /* MII bus support routines.
1061  */
1062 static void
1063 cgem_child_detached(device_t dev, device_t child)
1064 {
1065         struct cgem_softc *sc = device_get_softc(dev);
1066         if (child == sc->miibus)
1067                 sc->miibus = NULL;
1068 }
1069
1070 static int
1071 cgem_ifmedia_upd(struct ifnet *ifp)
1072 {
1073         struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
1074         struct mii_data *mii;
1075
1076         mii = device_get_softc(sc->miibus);
1077         CGEM_LOCK(sc);
1078         mii_mediachg(mii);
1079         CGEM_UNLOCK(sc);
1080         return (0);
1081 }
1082
1083 static void
1084 cgem_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1085 {
1086         struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
1087         struct mii_data *mii;
1088
1089         mii = device_get_softc(sc->miibus);
1090         CGEM_LOCK(sc);
1091         mii_pollstat(mii);
1092         ifmr->ifm_active = mii->mii_media_active;
1093         ifmr->ifm_status = mii->mii_media_status;
1094         CGEM_UNLOCK(sc);
1095 }
1096
1097 static int
1098 cgem_miibus_readreg(device_t dev, int phy, int reg)
1099 {
1100         struct cgem_softc *sc = device_get_softc(dev);
1101         int tries, val;
1102
1103         WR4(sc, CGEM_PHY_MAINT,
1104             CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1105             CGEM_PHY_MAINT_OP_READ |
1106             (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1107             (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT));
1108
1109         /* Wait for completion. */
1110         tries=0;
1111         while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1112                 DELAY(5);
1113                 if (++tries > 200) {
1114                         device_printf(dev, "phy read timeout: %d\n", reg);
1115                         return (-1);
1116                 }
1117         }
1118
1119         val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK;
1120
1121         return (val);
1122 }
1123
1124 static int
1125 cgem_miibus_writereg(device_t dev, int phy, int reg, int data)
1126 {
1127         struct cgem_softc *sc = device_get_softc(dev);
1128         int tries;
1129         
1130         WR4(sc, CGEM_PHY_MAINT,
1131             CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1132             CGEM_PHY_MAINT_OP_WRITE |
1133             (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1134             (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) |
1135             (data & CGEM_PHY_MAINT_DATA_MASK));
1136
1137         /* Wait for completion. */
1138         tries = 0;
1139         while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1140                 DELAY(5);
1141                 if (++tries > 200) {
1142                         device_printf(dev, "phy write timeout: %d\n", reg);
1143                         return (-1);
1144                 }
1145         }
1146
1147         return (0);
1148 }
1149
1150
1151 static int
1152 cgem_probe(device_t dev)
1153 {
1154
1155         if (!ofw_bus_is_compatible(dev, "cadence,gem"))
1156                 return (ENXIO);
1157
1158         device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface");
1159         return (0);
1160 }
1161
1162 static int
1163 cgem_attach(device_t dev)
1164 {
1165         struct cgem_softc *sc = device_get_softc(dev);
1166         struct ifnet *ifp = NULL;
1167         int rid, err;
1168         u_char eaddr[ETHER_ADDR_LEN];
1169
1170         sc->dev = dev;
1171         CGEM_LOCK_INIT(sc);
1172
1173         /* Get memory resource. */
1174         rid = 0;
1175         sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1176                                              RF_ACTIVE);
1177         if (sc->mem_res == NULL) {
1178                 device_printf(dev, "could not allocate memory resources.\n");
1179                 return (ENOMEM);
1180         }
1181
1182         /* Get IRQ resource. */
1183         rid = 0;
1184         sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1185                                              RF_ACTIVE);
1186         if (sc->irq_res == NULL) {
1187                 device_printf(dev, "could not allocate interrupt resource.\n");
1188                 cgem_detach(dev);
1189                 return (ENOMEM);
1190         }
1191
1192         ifp = sc->ifp = if_alloc(IFT_ETHER);
1193         if (ifp == NULL) {
1194                 device_printf(dev, "could not allocate ifnet structure\n");
1195                 cgem_detach(dev);
1196                 return (ENOMEM);
1197         }
1198
1199         CGEM_LOCK(sc);
1200
1201         /* Reset hardware. */
1202         cgem_reset(sc);
1203
1204         /* Attach phy to mii bus. */
1205         err = mii_attach(dev, &sc->miibus, ifp,
1206                          cgem_ifmedia_upd, cgem_ifmedia_sts,
1207                          BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1208         if (err) {
1209                 CGEM_UNLOCK(sc);
1210                 device_printf(dev, "attaching PHYs failed\n");
1211                 cgem_detach(dev);
1212                 return (err);
1213         }
1214
1215         /* Set up TX and RX descriptor area. */
1216         err = cgem_setup_descs(sc);
1217         if (err) {
1218                 CGEM_UNLOCK(sc);
1219                 device_printf(dev, "could not set up dma mem for descs.\n");
1220                 cgem_detach(dev);
1221                 return (ENOMEM);
1222         }
1223
1224         /* Get a MAC address. */
1225         cgem_get_mac(sc, eaddr);
1226
1227         /* Start ticks. */
1228         callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1229
1230         /* Set up ifnet structure. */
1231         ifp->if_softc = sc;
1232         if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev));
1233         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1234         ifp->if_start = cgem_start;
1235         ifp->if_ioctl = cgem_ioctl;
1236         ifp->if_init = cgem_init;
1237         ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
1238         /* XXX: disable hw checksumming for now. */
1239         ifp->if_hwassist = 0;
1240         ifp->if_capenable = ifp->if_capabilities &
1241                 ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
1242         IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1243         ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
1244         IFQ_SET_READY(&ifp->if_snd);
1245
1246         sc->if_old_flags = ifp->if_flags;
1247         sc->rxbufs = DEFAULT_NUM_RX_BUFS;
1248
1249         ether_ifattach(ifp, eaddr);
1250
1251         err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
1252                              INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
1253         if (err) {
1254                 CGEM_UNLOCK(sc);
1255                 device_printf(dev, "could not set interrupt handler.\n");
1256                 ether_ifdetach(ifp);
1257                 cgem_detach(dev);
1258                 return (err);
1259         }
1260
1261         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1262                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1263                        OID_AUTO, "rxbufs", CTLFLAG_RW,
1264                        &sc->rxbufs, 0,
1265                        "Number receive buffers to provide");
1266
1267         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1268                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1269                        OID_AUTO, "_rxoverruns", CTLFLAG_RD,
1270                        &sc->rxoverruns, 0,
1271                        "Receive ring overrun events");
1272
1273         CGEM_UNLOCK(sc);
1274
1275         return (0);
1276 }
1277
1278 static int
1279 cgem_detach(device_t dev)
1280 {
1281         struct cgem_softc *sc = device_get_softc(dev);
1282         int i;
1283
1284         if (sc == NULL)
1285                 return (ENODEV);
1286
1287         if (device_is_attached(dev)) {
1288                 CGEM_LOCK(sc);
1289                 cgem_stop(sc);
1290                 CGEM_UNLOCK(sc);
1291                 callout_drain(&sc->tick_ch);
1292                 sc->ifp->if_flags &= ~IFF_UP;
1293                 ether_ifdetach(sc->ifp);
1294         }
1295
1296         if (sc->miibus != NULL) {
1297                 device_delete_child(dev, sc->miibus);
1298                 sc->miibus = NULL;
1299         }
1300
1301         /* Release resrouces. */
1302         if (sc->mem_res != NULL) {
1303                 bus_release_resource(dev, SYS_RES_MEMORY,
1304                                      rman_get_rid(sc->mem_res), sc->mem_res);
1305                 sc->mem_res = NULL;
1306         }
1307         if (sc->irq_res != NULL) {
1308                 if (sc->intrhand)
1309                         bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1310                 bus_release_resource(dev, SYS_RES_IRQ,
1311                                      rman_get_rid(sc->irq_res), sc->irq_res);
1312                 sc->irq_res = NULL;
1313         }
1314
1315         /* Release DMA resources. */
1316         if (sc->rxring_dma_map != NULL) {
1317                 bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
1318                                 sc->rxring_dma_map);
1319                 sc->rxring_dma_map = NULL;
1320                 for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
1321                         if (sc->rxring_m_dmamap[i] != NULL) {
1322                                 bus_dmamap_destroy(sc->mbuf_dma_tag,
1323                                                    sc->rxring_m_dmamap[i]);
1324                                 sc->rxring_m_dmamap[i] = NULL;
1325                         }
1326         }
1327         if (sc->txring_dma_map != NULL) {
1328                 bus_dmamem_free(sc->desc_dma_tag, sc->txring,
1329                                 sc->txring_dma_map);
1330                 sc->txring_dma_map = NULL;
1331                 for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
1332                         if (sc->txring_m_dmamap[i] != NULL) {
1333                                 bus_dmamap_destroy(sc->mbuf_dma_tag,
1334                                                    sc->txring_m_dmamap[i]);
1335                                 sc->txring_m_dmamap[i] = NULL;
1336                         }
1337         }
1338         if (sc->desc_dma_tag != NULL) {
1339                 bus_dma_tag_destroy(sc->desc_dma_tag);
1340                 sc->desc_dma_tag = NULL;
1341         }
1342         if (sc->mbuf_dma_tag != NULL) {
1343                 bus_dma_tag_destroy(sc->mbuf_dma_tag);
1344                 sc->mbuf_dma_tag = NULL;
1345         }
1346
1347         bus_generic_detach(dev);
1348
1349         CGEM_LOCK_DESTROY(sc);
1350
1351         return (0);
1352 }
1353
1354 static device_method_t cgem_methods[] = {
1355         /* Device interface */
1356         DEVMETHOD(device_probe,         cgem_probe),
1357         DEVMETHOD(device_attach,        cgem_attach),
1358         DEVMETHOD(device_detach,        cgem_detach),
1359
1360         /* Bus interface */
1361         DEVMETHOD(bus_child_detached,   cgem_child_detached),
1362
1363         /* MII interface */
1364         DEVMETHOD(miibus_readreg,       cgem_miibus_readreg),
1365         DEVMETHOD(miibus_writereg,      cgem_miibus_writereg),
1366
1367         DEVMETHOD_END
1368 };
1369
1370 static driver_t cgem_driver = {
1371         "cgem",
1372         cgem_methods,
1373         sizeof(struct cgem_softc),
1374 };
1375
1376 DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL);
1377 DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL);
1378 MODULE_DEPEND(cgem, miibus, 1, 1, 1);
1379 MODULE_DEPEND(cgem, ether, 1, 1, 1);