2 * Copyright (C) 2001 Eduardo Horvath.
3 * Copyright (c) 2001-2003 Thomas Moestl
4 * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers
42 #if 0 /* XXX: In case of emergency, re-enable this. */
43 #define GEM_RINT_TIMEOUT
46 #include <sys/param.h>
47 #include <sys/systm.h>
49 #include <sys/callout.h>
50 #include <sys/endian.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
55 #include <sys/module.h>
56 #include <sys/mutex.h>
57 #include <sys/socket.h>
58 #include <sys/sockio.h>
62 #include <net/ethernet.h>
64 #include <net/if_arp.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/if_vlan_var.h>
70 #include <netinet/in.h>
71 #include <netinet/in_systm.h>
72 #include <netinet/ip.h>
73 #include <netinet/tcp.h>
74 #include <netinet/udp.h>
76 #include <machine/bus.h>
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
81 #include <dev/gem/if_gemreg.h>
82 #include <dev/gem/if_gemvar.h>
84 CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192);
85 CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192);
87 #define GEM_TRIES 10000
90 * The hardware supports basic TCP/UDP checksum offloading. However,
91 * the hardware doesn't compensate the checksum for UDP datagram which
92 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled
93 * by default. It can be reactivated by setting special link option
94 * link0 with ifconfig(8).
96 #define GEM_CSUM_FEATURES (CSUM_TCP)
98 static int gem_add_rxbuf(struct gem_softc *sc, int idx);
99 static int gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r,
100 uint32_t clr, uint32_t set);
101 static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs,
102 int nsegs, int error);
103 static int gem_disable_rx(struct gem_softc *sc);
104 static int gem_disable_tx(struct gem_softc *sc);
105 static void gem_eint(struct gem_softc *sc, u_int status);
106 static void gem_init(void *xsc);
107 static void gem_init_locked(struct gem_softc *sc);
108 static void gem_init_regs(struct gem_softc *sc);
109 static int gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
110 static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head);
111 static int gem_meminit(struct gem_softc *sc);
112 static void gem_mifinit(struct gem_softc *sc);
113 static void gem_reset(struct gem_softc *sc);
114 static int gem_reset_rx(struct gem_softc *sc);
115 static void gem_reset_rxdma(struct gem_softc *sc);
116 static int gem_reset_tx(struct gem_softc *sc);
117 static u_int gem_ringsize(u_int sz);
118 static void gem_rint(struct gem_softc *sc);
119 #ifdef GEM_RINT_TIMEOUT
120 static void gem_rint_timeout(void *arg);
122 static inline void gem_rxcksum(struct mbuf *m, uint64_t flags);
123 static void gem_rxdrain(struct gem_softc *sc);
124 static void gem_setladrf(struct gem_softc *sc);
125 static void gem_start(struct ifnet *ifp);
126 static void gem_start_locked(struct ifnet *ifp);
127 static void gem_stop(struct ifnet *ifp, int disable);
128 static void gem_tick(void *arg);
129 static void gem_tint(struct gem_softc *sc);
130 static inline void gem_txkick(struct gem_softc *sc);
131 static int gem_watchdog(struct gem_softc *sc);
133 devclass_t gem_devclass;
134 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0);
135 MODULE_DEPEND(gem, miibus, 1, 1, 1);
139 #define KTR_GEM KTR_CT2
142 #define GEM_BANK1_BITWAIT(sc, r, clr, set) \
143 gem_bitwait((sc), GEM_RES_BANK1, (r), (clr), (set))
144 #define GEM_BANK2_BITWAIT(sc, r, clr, set) \
145 gem_bitwait((sc), GEM_RES_BANK2, (r), (clr), (set))
148 gem_attach(struct gem_softc *sc)
150 struct gem_txsoft *txs;
156 device_printf(sc->sc_dev, "flags=0x%x\n", sc->sc_flags);
158 /* Set up ifnet structure. */
159 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
162 sc->sc_csum_features = GEM_CSUM_FEATURES;
164 if_initname(ifp, device_get_name(sc->sc_dev),
165 device_get_unit(sc->sc_dev));
166 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
167 ifp->if_start = gem_start;
168 ifp->if_ioctl = gem_ioctl;
169 ifp->if_init = gem_init;
170 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN);
171 ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN;
172 IFQ_SET_READY(&ifp->if_snd);
174 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
175 #ifdef GEM_RINT_TIMEOUT
176 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
179 /* Make sure the chip is stopped. */
182 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
183 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
184 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,
185 NULL, &sc->sc_pdmatag);
189 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
190 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
191 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
195 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
196 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
197 MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES,
198 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
202 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
203 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
204 sizeof(struct gem_control_data), 1,
205 sizeof(struct gem_control_data), 0,
206 NULL, NULL, &sc->sc_cdmatag);
211 * Allocate the control data structures, create and load the
214 if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
215 (void **)&sc->sc_control_data,
216 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
217 &sc->sc_cddmamap)) != 0) {
218 device_printf(sc->sc_dev,
219 "unable to allocate control data, error = %d\n", error);
224 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
225 sc->sc_control_data, sizeof(struct gem_control_data),
226 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
227 device_printf(sc->sc_dev,
228 "unable to load control data DMA map, error = %d\n",
234 * Initialize the transmit job descriptors.
236 STAILQ_INIT(&sc->sc_txfreeq);
237 STAILQ_INIT(&sc->sc_txdirtyq);
240 * Create the transmit buffer DMA maps.
243 for (i = 0; i < GEM_TXQUEUELEN; i++) {
244 txs = &sc->sc_txsoft[i];
245 txs->txs_mbuf = NULL;
247 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
248 &txs->txs_dmamap)) != 0) {
249 device_printf(sc->sc_dev,
250 "unable to create TX DMA map %d, error = %d\n",
254 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
258 * Create the receive buffer DMA maps.
260 for (i = 0; i < GEM_NRXDESC; i++) {
261 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0,
262 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
263 device_printf(sc->sc_dev,
264 "unable to create RX DMA map %d, error = %d\n",
268 sc->sc_rxsoft[i].rxs_mbuf = NULL;
271 /* Bad things will happen when touching this register on ERI. */
272 if (sc->sc_variant != GEM_SUN_ERI)
273 GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE,
274 GEM_MII_DATAPATH_MII);
279 * Look for an external PHY.
282 v = GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG);
283 if ((v & GEM_MIF_CONFIG_MDI1) != 0) {
284 v |= GEM_MIF_CONFIG_PHY_SEL;
285 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v);
286 switch (sc->sc_variant) {
288 sc->sc_phyad = GEM_PHYAD_EXTERNAL;
294 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus,
295 gem_mediachange, gem_mediastatus);
299 * Fall back on an internal PHY if no external PHY was found.
301 if (error != 0 && (v & GEM_MIF_CONFIG_MDI0) != 0) {
302 v &= ~GEM_MIF_CONFIG_PHY_SEL;
303 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v);
304 switch (sc->sc_variant) {
306 case GEM_APPLE_K2_GMAC:
307 sc->sc_phyad = GEM_PHYAD_INTERNAL;
310 sc->sc_phyad = GEM_PHYAD_EXTERNAL;
316 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus,
317 gem_mediachange, gem_mediastatus);
321 * Try the external PCS SERDES if we didn't find any PHYs.
323 if (error != 0 && sc->sc_variant == GEM_SUN_GEM) {
324 GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE,
325 GEM_MII_DATAPATH_SERDES);
326 GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
327 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
328 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
329 sc->sc_flags |= GEM_SERDES;
330 sc->sc_phyad = GEM_PHYAD_EXTERNAL;
331 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus,
332 gem_mediachange, gem_mediastatus);
336 device_printf(sc->sc_dev, "PHY probe failed: %d\n", error);
339 sc->sc_mii = device_get_softc(sc->sc_miibus);
342 * From this point forward, the attachment cannot fail. A failure
343 * before this point releases all resources that may have been
347 /* Get RX FIFO size. */
348 sc->sc_rxfifosize = 64 *
349 GEM_BANK1_READ_4(sc, GEM_RX_FIFO_SIZE);
351 /* Get TX FIFO size. */
352 v = GEM_BANK1_READ_4(sc, GEM_TX_FIFO_SIZE);
353 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
354 sc->sc_rxfifosize / 1024, v / 16);
356 /* Attach the interface. */
357 ether_ifattach(ifp, sc->sc_enaddr);
360 * Tell the upper layer(s) we support long frames/checksum offloads.
362 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
363 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
364 ifp->if_hwassist |= sc->sc_csum_features;
365 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
370 * Free any resources we've allocated during the failed attach
371 * attempt. Do this in reverse order and fall through.
374 for (i = 0; i < GEM_NRXDESC; i++)
375 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
376 bus_dmamap_destroy(sc->sc_rdmatag,
377 sc->sc_rxsoft[i].rxs_dmamap);
379 for (i = 0; i < GEM_TXQUEUELEN; i++)
380 if (sc->sc_txsoft[i].txs_dmamap != NULL)
381 bus_dmamap_destroy(sc->sc_tdmatag,
382 sc->sc_txsoft[i].txs_dmamap);
383 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
385 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
388 bus_dma_tag_destroy(sc->sc_cdmatag);
390 bus_dma_tag_destroy(sc->sc_tdmatag);
392 bus_dma_tag_destroy(sc->sc_rdmatag);
394 bus_dma_tag_destroy(sc->sc_pdmatag);
401 gem_detach(struct gem_softc *sc)
403 struct ifnet *ifp = sc->sc_ifp;
410 callout_drain(&sc->sc_tick_ch);
411 #ifdef GEM_RINT_TIMEOUT
412 callout_drain(&sc->sc_rx_ch);
415 device_delete_child(sc->sc_dev, sc->sc_miibus);
417 for (i = 0; i < GEM_NRXDESC; i++)
418 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
419 bus_dmamap_destroy(sc->sc_rdmatag,
420 sc->sc_rxsoft[i].rxs_dmamap);
421 for (i = 0; i < GEM_TXQUEUELEN; i++)
422 if (sc->sc_txsoft[i].txs_dmamap != NULL)
423 bus_dmamap_destroy(sc->sc_tdmatag,
424 sc->sc_txsoft[i].txs_dmamap);
425 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
426 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
427 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
429 bus_dma_tag_destroy(sc->sc_cdmatag);
430 bus_dma_tag_destroy(sc->sc_tdmatag);
431 bus_dma_tag_destroy(sc->sc_rdmatag);
432 bus_dma_tag_destroy(sc->sc_pdmatag);
436 gem_suspend(struct gem_softc *sc)
438 struct ifnet *ifp = sc->sc_ifp;
446 gem_resume(struct gem_softc *sc)
448 struct ifnet *ifp = sc->sc_ifp;
452 * On resume all registers have to be initialized again like
455 sc->sc_flags &= ~GEM_INITED;
456 if (ifp->if_flags & IFF_UP)
462 gem_rxcksum(struct mbuf *m, uint64_t flags)
464 struct ether_header *eh;
468 int32_t hlen, len, pktlen;
472 pktlen = m->m_pkthdr.len;
473 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
475 eh = mtod(m, struct ether_header *);
476 if (eh->ether_type != htons(ETHERTYPE_IP))
478 ip = (struct ip *)(eh + 1);
479 if (ip->ip_v != IPVERSION)
482 hlen = ip->ip_hl << 2;
483 pktlen -= sizeof(struct ether_header);
484 if (hlen < sizeof(struct ip))
486 if (ntohs(ip->ip_len) < hlen)
488 if (ntohs(ip->ip_len) != pktlen)
490 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
491 return; /* Cannot handle fragmented packet. */
495 if (pktlen < (hlen + sizeof(struct tcphdr)))
499 if (pktlen < (hlen + sizeof(struct udphdr)))
501 uh = (struct udphdr *)((uint8_t *)ip + hlen);
503 return; /* no checksum */
509 cksum = ~(flags & GEM_RD_CHECKSUM);
510 /* checksum fixup for IP options */
511 len = hlen - sizeof(struct ip);
513 opts = (uint16_t *)(ip + 1);
514 for (; len > 0; len -= sizeof(uint16_t), opts++) {
515 temp32 = cksum - *opts;
516 temp32 = (temp32 >> 16) + (temp32 & 65535);
517 cksum = temp32 & 65535;
520 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
521 m->m_pkthdr.csum_data = cksum;
525 gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
527 struct gem_softc *sc = xsc;
532 panic("%s: bad control buffer segment count", __func__);
533 sc->sc_cddma = segs[0].ds_addr;
539 struct gem_softc *sc = arg;
540 struct ifnet *ifp = sc->sc_ifp;
543 GEM_LOCK_ASSERT(sc, MA_OWNED);
546 * Unload collision and error counters.
548 ifp->if_collisions +=
549 GEM_BANK1_READ_4(sc, GEM_MAC_NORM_COLL_CNT) +
550 GEM_BANK1_READ_4(sc, GEM_MAC_FIRST_COLL_CNT);
551 v = GEM_BANK1_READ_4(sc, GEM_MAC_EXCESS_COLL_CNT) +
552 GEM_BANK1_READ_4(sc, GEM_MAC_LATE_COLL_CNT);
553 ifp->if_collisions += v;
554 ifp->if_oerrors += v;
556 GEM_BANK1_READ_4(sc, GEM_MAC_RX_LEN_ERR_CNT) +
557 GEM_BANK1_READ_4(sc, GEM_MAC_RX_ALIGN_ERR) +
558 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CRC_ERR_CNT) +
559 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CODE_VIOL);
562 * Then clear the hardware counters.
564 GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
565 GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
566 GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
567 GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
568 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0);
569 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0);
570 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0);
571 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0);
573 mii_tick(sc->sc_mii);
575 if (gem_watchdog(sc) == EJUSTRETURN)
578 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
582 gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r, uint32_t clr,
588 for (i = GEM_TRIES; i--; DELAY(100)) {
589 reg = GEM_BANKN_READ_M(bank, 4, sc, r);
590 if ((reg & clr) == 0 && (reg & set) == set)
597 gem_reset(struct gem_softc *sc)
601 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
606 /* Do a full reset. */
607 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX);
608 GEM_BANK2_BARRIER(sc, GEM_RESET, 4,
609 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
610 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
611 device_printf(sc->sc_dev, "cannot reset device\n");
615 gem_rxdrain(struct gem_softc *sc)
617 struct gem_rxsoft *rxs;
620 for (i = 0; i < GEM_NRXDESC; i++) {
621 rxs = &sc->sc_rxsoft[i];
622 if (rxs->rxs_mbuf != NULL) {
623 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
624 BUS_DMASYNC_POSTREAD);
625 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
626 m_freem(rxs->rxs_mbuf);
627 rxs->rxs_mbuf = NULL;
633 gem_stop(struct ifnet *ifp, int disable)
635 struct gem_softc *sc = ifp->if_softc;
636 struct gem_txsoft *txs;
639 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
642 callout_stop(&sc->sc_tick_ch);
643 #ifdef GEM_RINT_TIMEOUT
644 callout_stop(&sc->sc_rx_ch);
651 * Release any queued transmit buffers.
653 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
654 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
655 if (txs->txs_ndescs != 0) {
656 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
657 BUS_DMASYNC_POSTWRITE);
658 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
659 if (txs->txs_mbuf != NULL) {
660 m_freem(txs->txs_mbuf);
661 txs->txs_mbuf = NULL;
664 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
671 * Mark the interface down and cancel the watchdog timer.
673 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
674 sc->sc_flags &= ~GEM_LINK;
675 sc->sc_wdog_timer = 0;
679 gem_reset_rx(struct gem_softc *sc)
683 * Resetting while DMA is in progress can cause a bus hang, so we
687 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 0);
688 GEM_BANK1_BARRIER(sc, GEM_RX_CONFIG, 4,
689 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
690 if (!GEM_BANK1_BITWAIT(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0))
691 device_printf(sc->sc_dev, "cannot disable RX DMA\n");
693 /* Finally, reset the ERX. */
694 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX);
695 GEM_BANK2_BARRIER(sc, GEM_RESET, 4,
696 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
697 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX,
699 device_printf(sc->sc_dev, "cannot reset receiver\n");
706 * Reset the receiver DMA engine.
708 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW
709 * etc in order to reset the receiver DMA engine only and not do a full
710 * reset which amongst others also downs the link and clears the FIFOs.
713 gem_reset_rxdma(struct gem_softc *sc)
717 if (gem_reset_rx(sc) != 0)
718 return (gem_init_locked(sc));
719 for (i = 0; i < GEM_NRXDESC; i++)
720 if (sc->sc_rxsoft[i].rxs_mbuf != NULL)
721 GEM_UPDATE_RXDESC(sc, i);
723 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
725 /* NOTE: we use only 32-bit DMA addresses here. */
726 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
727 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
728 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
729 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
730 gem_ringsize(GEM_NRXDESC /* XXX */) |
731 ((ETHER_HDR_LEN + sizeof(struct ip)) <<
732 GEM_RX_CONFIG_CXM_START_SHFT) |
733 (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
734 (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT));
735 /* Adjust for the SBus clock probably isn't worth the fuzz. */
736 GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING,
737 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
738 GEM_RX_BLANKING_TIME_SHIFT) | 6);
739 GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
740 (3 * sc->sc_rxfifosize / 256) |
741 ((sc->sc_rxfifosize / 256) << 12));
742 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
743 GEM_BANK1_READ_4(sc, GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN);
744 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK,
745 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
746 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG,
747 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) | GEM_MAC_RX_ENABLE);
751 gem_reset_tx(struct gem_softc *sc)
755 * Resetting while DMA is in progress can cause a bus hang, so we
759 GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, 0);
760 GEM_BANK1_BARRIER(sc, GEM_TX_CONFIG, 4,
761 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
762 if (!GEM_BANK1_BITWAIT(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0))
763 device_printf(sc->sc_dev, "cannot disable TX DMA\n");
765 /* Finally, reset the ETX. */
766 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_TX);
767 GEM_BANK2_BARRIER(sc, GEM_RESET, 4,
768 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
769 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX,
771 device_printf(sc->sc_dev, "cannot reset transmitter\n");
778 gem_disable_rx(struct gem_softc *sc)
781 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG,
782 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) & ~GEM_MAC_RX_ENABLE);
783 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
784 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
785 return (GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE,
790 gem_disable_tx(struct gem_softc *sc)
793 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG,
794 GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG) & ~GEM_MAC_TX_ENABLE);
795 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4,
796 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
797 return (GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE,
802 gem_meminit(struct gem_softc *sc)
804 struct gem_rxsoft *rxs;
807 GEM_LOCK_ASSERT(sc, MA_OWNED);
810 * Initialize the transmit descriptor ring.
812 for (i = 0; i < GEM_NTXDESC; i++) {
813 sc->sc_txdescs[i].gd_flags = 0;
814 sc->sc_txdescs[i].gd_addr = 0;
816 sc->sc_txfree = GEM_MAXTXFREE;
821 * Initialize the receive descriptor and receive job
824 for (i = 0; i < GEM_NRXDESC; i++) {
825 rxs = &sc->sc_rxsoft[i];
826 if (rxs->rxs_mbuf == NULL) {
827 if ((error = gem_add_rxbuf(sc, i)) != 0) {
828 device_printf(sc->sc_dev,
829 "unable to allocate or map RX buffer %d, "
830 "error = %d\n", i, error);
832 * XXX we should attempt to run with fewer
833 * receive buffers instead of just failing.
839 GEM_INIT_RXDESC(sc, i);
843 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
849 gem_ringsize(u_int sz)
854 return (GEM_RING_SZ_32);
856 return (GEM_RING_SZ_64);
858 return (GEM_RING_SZ_128);
860 return (GEM_RING_SZ_256);
862 return (GEM_RING_SZ_512);
864 return (GEM_RING_SZ_1024);
866 return (GEM_RING_SZ_2048);
868 return (GEM_RING_SZ_4096);
870 return (GEM_RING_SZ_8192);
872 printf("%s: invalid ring size %d\n", __func__, sz);
873 return (GEM_RING_SZ_32);
880 struct gem_softc *sc = xsc;
888 * Initialization of interface; set up initialization block
889 * and transmit/receive descriptor rings.
892 gem_init_locked(struct gem_softc *sc)
894 struct ifnet *ifp = sc->sc_ifp;
897 GEM_LOCK_ASSERT(sc, MA_OWNED);
900 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev),
904 * Initialization sequence. The numbered steps below correspond
905 * to the sequence outlined in section 6.3.5.1 in the Ethernet
906 * Channel Engine manual (part of the PCIO manual).
907 * See also the STP2002-STQ document from Sun Microsystems.
910 /* step 1 & 2. Reset the Ethernet Channel. */
914 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev),
918 /* Re-initialize the MIF. */
921 /* step 3. Setup data structures in host memory. */
922 if (gem_meminit(sc) != 0)
925 /* step 4. TX MAC registers & counters */
928 /* step 5. RX MAC registers & counters */
931 /* step 6 & 7. Program Descriptor Ring Base Addresses. */
932 /* NOTE: we use only 32-bit DMA addresses here. */
933 GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_HI, 0);
934 GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
936 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
937 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
939 CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx",
940 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma);
943 /* step 8. Global Configuration & Interrupt Mask */
946 * Set the internal arbitration to "infinite" bursts of the
947 * maximum length of 31 * 64 bytes so DMA transfers aren't
948 * split up in cache line size chunks. This greatly improves
950 * Enable silicon bug workarounds for the Apple variants.
952 GEM_BANK1_WRITE_4(sc, GEM_CONFIG,
953 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
954 ((sc->sc_flags & GEM_PCI) != 0 ? GEM_CONFIG_BURST_INF :
955 GEM_CONFIG_BURST_64) | (GEM_IS_APPLE(sc) ?
956 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
958 GEM_BANK1_WRITE_4(sc, GEM_INTMASK,
959 ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE |
960 GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR |
963 | GEM_INTR_PCS | GEM_INTR_MIF
966 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK,
967 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
968 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_MASK,
969 GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
970 GEM_MAC_TX_PEAK_EXP);
972 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
973 ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME));
975 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
976 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME);
979 /* step 9. ETX Configuration: use mostly default values. */
982 v = gem_ringsize(GEM_NTXDESC);
983 /* Set TX FIFO threshold and enable DMA. */
984 v |= ((sc->sc_variant == GEM_SUN_ERI ? 0x100 : 0x4ff) << 10) &
985 GEM_TX_CONFIG_TXFIFO_TH;
986 GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN);
988 /* step 10. ERX Configuration */
990 /* Encode Receive Descriptor ring size. */
991 v = gem_ringsize(GEM_NRXDESC /* XXX */);
992 /* RX TCP/UDP checksum offset */
993 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) <<
994 GEM_RX_CONFIG_CXM_START_SHFT);
995 /* Set RX FIFO threshold, set first byte offset and enable DMA. */
996 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
997 v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
998 (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT) |
999 GEM_RX_CONFIG_RXDMA_EN);
1001 /* Adjust for the SBus clock probably isn't worth the fuzz. */
1002 GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING,
1003 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
1004 GEM_RX_BLANKING_TIME_SHIFT) | 6);
1007 * The following value is for an OFF Threshold of about 3/4 full
1008 * and an ON Threshold of 1/4 full.
1010 GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
1011 (3 * sc->sc_rxfifosize / 256) |
1012 ((sc->sc_rxfifosize / 256) << 12));
1014 /* step 11. Configure Media. */
1016 /* step 12. RX_MAC Configuration Register */
1017 v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG);
1018 v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
1019 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 0);
1020 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
1021 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1022 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
1023 device_printf(sc->sc_dev, "cannot configure RX MAC\n");
1024 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);
1026 /* step 13. TX_MAC Configuration Register */
1027 v = GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG);
1028 v |= GEM_MAC_TX_ENABLE;
1029 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 0);
1030 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4,
1031 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1032 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0))
1033 device_printf(sc->sc_dev, "cannot configure TX MAC\n");
1034 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, v);
1036 /* step 14. Issue Transmit Pending command. */
1038 /* step 15. Give the reciever a swift kick. */
1039 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
1041 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1042 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1044 mii_mediachg(sc->sc_mii);
1046 /* Start the one second timer. */
1047 sc->sc_wdog_timer = 0;
1048 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
1052 gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head)
1054 bus_dma_segment_t txsegs[GEM_NTXSEGS];
1055 struct gem_txsoft *txs;
1058 uint64_t cflags, flags;
1059 int error, nexttx, nsegs, offset, seg;
1061 GEM_LOCK_ASSERT(sc, MA_OWNED);
1063 /* Get a work queue entry. */
1064 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1065 /* Ran out of descriptors. */
1070 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
1071 if (M_WRITABLE(*m_head) == 0) {
1072 m = m_dup(*m_head, M_DONTWAIT);
1078 offset = sizeof(struct ether_header);
1079 m = m_pullup(*m_head, offset + sizeof(struct ip));
1084 ip = (struct ip *)(mtod(m, caddr_t) + offset);
1085 offset += (ip->ip_hl << 2);
1086 cflags = offset << GEM_TD_CXSUM_STARTSHFT |
1087 ((offset + m->m_pkthdr.csum_data) <<
1088 GEM_TD_CXSUM_STUFFSHFT) | GEM_TD_CXSUM_ENABLE;
1092 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1093 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1094 if (error == EFBIG) {
1095 m = m_collapse(*m_head, M_DONTWAIT, GEM_NTXSEGS);
1102 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag,
1103 txs->txs_dmamap, *m_head, txsegs, &nsegs,
1110 } else if (error != 0)
1112 /* If nsegs is wrong then the stack is corrupt. */
1113 KASSERT(nsegs <= GEM_NTXSEGS,
1114 ("%s: too many DMA segments (%d)", __func__, nsegs));
1122 * Ensure we have enough descriptors free to describe
1123 * the packet. Note, we always reserve one descriptor
1124 * at the end of the ring as a termination point, in
1125 * order to prevent wrap-around.
1127 if (nsegs > sc->sc_txfree - 1) {
1128 txs->txs_ndescs = 0;
1129 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1133 txs->txs_ndescs = nsegs;
1134 txs->txs_firstdesc = sc->sc_txnext;
1135 nexttx = txs->txs_firstdesc;
1136 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) {
1139 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)",
1140 __func__, seg, nexttx, txsegs[seg].ds_len,
1141 txsegs[seg].ds_addr,
1142 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr));
1144 sc->sc_txdescs[nexttx].gd_addr =
1145 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr);
1146 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE,
1147 ("%s: segment size too large!", __func__));
1148 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE;
1149 sc->sc_txdescs[nexttx].gd_flags =
1150 GEM_DMA_WRITE(sc, flags | cflags);
1151 txs->txs_lastdesc = nexttx;
1154 /* Set EOP on the last descriptor. */
1156 CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d",
1157 __func__, seg, nexttx);
1159 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |=
1160 GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET);
1162 /* Lastly set SOP on the first descriptor. */
1164 CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d",
1165 __func__, seg, nexttx);
1167 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
1169 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1170 GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME |
1171 GEM_TD_START_OF_PACKET);
1173 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1174 GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET);
1176 /* Sync the DMA map. */
1177 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1178 BUS_DMASYNC_PREWRITE);
1181 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
1182 __func__, txs->txs_firstdesc, txs->txs_lastdesc,
1185 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1186 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1187 txs->txs_mbuf = *m_head;
1189 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1190 sc->sc_txfree -= txs->txs_ndescs;
1196 gem_init_regs(struct gem_softc *sc)
1198 const u_char *laddr = IF_LLADDR(sc->sc_ifp);
1200 GEM_LOCK_ASSERT(sc, MA_OWNED);
1202 /* These registers are not cleared on reset. */
1203 if ((sc->sc_flags & GEM_INITED) == 0) {
1205 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG0, 0);
1206 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG1, 8);
1207 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG2, 4);
1209 /* min frame length */
1210 GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1211 /* max frame length and max burst size */
1212 GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MAX_FRAME,
1213 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
1215 /* more magic values */
1216 GEM_BANK1_WRITE_4(sc, GEM_MAC_PREAMBLE_LEN, 0x7);
1217 GEM_BANK1_WRITE_4(sc, GEM_MAC_JAM_SIZE, 0x4);
1218 GEM_BANK1_WRITE_4(sc, GEM_MAC_ATTEMPT_LIMIT, 0x10);
1219 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_TYPE, 0x8088);
1221 /* random number seed */
1222 GEM_BANK1_WRITE_4(sc, GEM_MAC_RANDOM_SEED,
1223 ((laddr[5] << 8) | laddr[4]) & 0x3ff);
1225 /* secondary MAC address: 0:0:0:0:0:0 */
1226 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR3, 0);
1227 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR4, 0);
1228 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR5, 0);
1230 /* MAC control address: 01:80:c2:00:00:01 */
1231 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR6, 0x0001);
1232 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR7, 0xc200);
1233 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR8, 0x0180);
1235 /* MAC filter address: 0:0:0:0:0:0 */
1236 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER0, 0);
1237 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER1, 0);
1238 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER2, 0);
1239 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK1_2, 0);
1240 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK0, 0);
1242 sc->sc_flags |= GEM_INITED;
1245 /* Counters need to be zeroed. */
1246 GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
1247 GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
1248 GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
1249 GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
1250 GEM_BANK1_WRITE_4(sc, GEM_MAC_DEFER_TMR_CNT, 0);
1251 GEM_BANK1_WRITE_4(sc, GEM_MAC_PEAK_ATTEMPTS, 0);
1252 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_FRAME_COUNT, 0);
1253 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0);
1254 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0);
1255 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0);
1256 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0);
1258 /* Set XOFF PAUSE time. */
1259 GEM_BANK1_WRITE_4(sc, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
1261 /* Set the station address. */
1262 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR0, (laddr[4] << 8) | laddr[5]);
1263 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR1, (laddr[2] << 8) | laddr[3]);
1264 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR2, (laddr[0] << 8) | laddr[1]);
1266 /* Enable MII outputs. */
1267 GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA);
1271 gem_start(struct ifnet *ifp)
1273 struct gem_softc *sc = ifp->if_softc;
1276 gem_start_locked(ifp);
1281 gem_txkick(struct gem_softc *sc)
1285 * Update the TX kick register. This register has to point to the
1286 * descriptor after the last valid one and for optimum performance
1287 * should be incremented in multiples of 4 (the DMA engine fetches/
1288 * updates descriptors in batches of 4).
1291 CTR3(KTR_GEM, "%s: %s: kicking TX %d",
1292 device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
1294 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1295 GEM_BANK1_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext);
1299 gem_start_locked(struct ifnet *ifp)
1301 struct gem_softc *sc = ifp->if_softc;
1305 GEM_LOCK_ASSERT(sc, MA_OWNED);
1307 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1308 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0)
1312 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d",
1313 device_get_name(sc->sc_dev), __func__, sc->sc_txfree,
1318 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) {
1319 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1322 if (gem_load_txmbuf(sc, &m) != 0) {
1325 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1326 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1329 if ((sc->sc_txnext % 4) == 0) {
1342 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
1343 device_get_name(sc->sc_dev), sc->sc_txnext);
1346 /* Set a watchdog timer in case the chip flakes out. */
1347 sc->sc_wdog_timer = 5;
1349 CTR3(KTR_GEM, "%s: %s: watchdog %d",
1350 device_get_name(sc->sc_dev), __func__,
1357 gem_tint(struct gem_softc *sc)
1359 struct ifnet *ifp = sc->sc_ifp;
1360 struct gem_txsoft *txs;
1366 GEM_LOCK_ASSERT(sc, MA_OWNED);
1368 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1372 * Go through our TX list and free mbufs for those
1373 * frames that have been transmitted.
1376 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1377 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1379 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1380 printf(" txsoft %p transmit chain:\n", txs);
1381 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
1382 printf("descriptor %d: ", i);
1383 printf("gd_flags: 0x%016llx\t",
1384 (long long)GEM_DMA_READ(sc,
1385 sc->sc_txdescs[i].gd_flags));
1386 printf("gd_addr: 0x%016llx\n",
1387 (long long)GEM_DMA_READ(sc,
1388 sc->sc_txdescs[i].gd_addr));
1389 if (i == txs->txs_lastdesc)
1396 * In theory, we could harvest some descriptors before
1397 * the ring is empty, but that's a bit complicated.
1399 * GEM_TX_COMPLETION points to the last descriptor
1402 txlast = GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION);
1404 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, "
1405 "txs->txs_lastdesc = %d, txlast = %d",
1406 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1408 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1409 if ((txlast >= txs->txs_firstdesc) &&
1410 (txlast <= txs->txs_lastdesc))
1413 /* Ick -- this command wraps. */
1414 if ((txlast >= txs->txs_firstdesc) ||
1415 (txlast <= txs->txs_lastdesc))
1420 CTR1(KTR_GEM, "%s: releasing a descriptor", __func__);
1422 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1424 sc->sc_txfree += txs->txs_ndescs;
1426 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1427 BUS_DMASYNC_POSTWRITE);
1428 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1429 if (txs->txs_mbuf != NULL) {
1430 m_freem(txs->txs_mbuf);
1431 txs->txs_mbuf = NULL;
1434 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1441 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx "
1442 "GEM_TX_COMPLETION %x",
1443 __func__, GEM_BANK1_READ_4(sc, GEM_TX_STATE_MACHINE),
1444 ((long long)GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_HI) << 32) |
1445 GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_LO),
1446 GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION));
1450 if (sc->sc_txfree == GEM_NTXDESC - 1)
1454 * We freed some descriptors, so reset IFF_DRV_OACTIVE
1457 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1458 if (STAILQ_EMPTY(&sc->sc_txdirtyq))
1459 sc->sc_wdog_timer = 0;
1460 gem_start_locked(ifp);
1464 CTR3(KTR_GEM, "%s: %s: watchdog %d",
1465 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1469 #ifdef GEM_RINT_TIMEOUT
1471 gem_rint_timeout(void *arg)
1473 struct gem_softc *sc = arg;
1475 GEM_LOCK_ASSERT(sc, MA_OWNED);
1482 gem_rint(struct gem_softc *sc)
1484 struct ifnet *ifp = sc->sc_ifp;
1489 GEM_LOCK_ASSERT(sc, MA_OWNED);
1491 #ifdef GEM_RINT_TIMEOUT
1492 callout_stop(&sc->sc_rx_ch);
1495 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1499 * Read the completion register once. This limits
1500 * how long the following loop can execute.
1502 rxcomp = GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION);
1504 CTR3(KTR_GEM, "%s: sc->sc_rxptr %d, complete %d",
1505 __func__, sc->sc_rxptr, rxcomp);
1507 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1508 for (; sc->sc_rxptr != rxcomp;) {
1509 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf;
1510 rxstat = GEM_DMA_READ(sc,
1511 sc->sc_rxdescs[sc->sc_rxptr].gd_flags);
1513 if (rxstat & GEM_RD_OWN) {
1514 #ifdef GEM_RINT_TIMEOUT
1516 * The descriptor is still marked as owned, although
1517 * it is supposed to have completed. This has been
1518 * observed on some machines. Just exiting here
1519 * might leave the packet sitting around until another
1520 * one arrives to trigger a new interrupt, which is
1521 * generally undesirable, so set up a timeout.
1523 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS,
1524 gem_rint_timeout, sc);
1530 if (rxstat & GEM_RD_BAD_CRC) {
1532 device_printf(sc->sc_dev, "receive error: CRC error\n");
1533 GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1539 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1540 printf(" rxsoft %p descriptor %d: ",
1541 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr);
1542 printf("gd_flags: 0x%016llx\t",
1543 (long long)GEM_DMA_READ(sc,
1544 sc->sc_rxdescs[sc->sc_rxptr].gd_flags));
1545 printf("gd_addr: 0x%016llx\n",
1546 (long long)GEM_DMA_READ(sc,
1547 sc->sc_rxdescs[sc->sc_rxptr].gd_addr));
1552 * Allocate a new mbuf cluster. If that fails, we are
1553 * out of memory, and must drop the packet and recycle
1554 * the buffer that's already attached to this descriptor.
1556 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) {
1558 GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1564 * Update the RX kick register. This register has to point
1565 * to the descriptor after the last valid one (before the
1566 * current batch) and for optimum performance should be
1567 * incremented in multiples of 4 (the DMA engine fetches/
1568 * updates descriptors in batches of 4).
1570 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr);
1571 if ((sc->sc_rxptr % 4) == 0) {
1573 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1574 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK,
1575 (sc->sc_rxptr + GEM_NRXDESC - 4) &
1580 if (rxstat & GEM_RD_OWN)
1586 m->m_data += ETHER_ALIGN; /* first byte offset */
1587 m->m_pkthdr.rcvif = ifp;
1588 m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat);
1590 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1591 gem_rxcksum(m, rxstat);
1595 (*ifp->if_input)(ifp, m);
1600 CTR3(KTR_GEM, "%s: done sc->sc_rxptr %d, complete %d", __func__,
1601 sc->sc_rxptr, GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION));
1606 gem_add_rxbuf(struct gem_softc *sc, int idx)
1608 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1610 bus_dma_segment_t segs[1];
1613 GEM_LOCK_ASSERT(sc, MA_OWNED);
1615 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1618 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1621 /* Bzero the packet to check DMA. */
1622 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1625 if (rxs->rxs_mbuf != NULL) {
1626 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1627 BUS_DMASYNC_POSTREAD);
1628 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
1631 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap,
1632 m, segs, &nsegs, BUS_DMA_NOWAIT);
1634 device_printf(sc->sc_dev,
1635 "cannot load RS DMA map %d, error = %d\n", idx, error);
1639 /* If nsegs is wrong then the stack is corrupt. */
1641 ("%s: too many DMA segments (%d)", __func__, nsegs));
1643 rxs->rxs_paddr = segs[0].ds_addr;
1645 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1646 BUS_DMASYNC_PREREAD);
1648 GEM_INIT_RXDESC(sc, idx);
1654 gem_eint(struct gem_softc *sc, u_int status)
1657 sc->sc_ifp->if_ierrors++;
1658 if ((status & GEM_INTR_RX_TAG_ERR) != 0) {
1659 gem_reset_rxdma(sc);
1663 device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status);
1664 if ((status & GEM_INTR_BERR) != 0) {
1665 if ((sc->sc_flags & GEM_PCI) != 0)
1666 printf(", PCI bus error 0x%x\n",
1667 GEM_BANK1_READ_4(sc, GEM_PCI_ERROR_STATUS));
1669 printf(", SBus error 0x%x\n",
1670 GEM_BANK1_READ_4(sc, GEM_SBUS_STATUS));
1677 struct gem_softc *sc = v;
1678 uint32_t status, status2;
1681 status = GEM_BANK1_READ_4(sc, GEM_STATUS);
1684 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x",
1685 device_get_name(sc->sc_dev), __func__,
1686 (status >> GEM_STATUS_TX_COMPLETION_SHFT), (u_int)status);
1689 * PCS interrupts must be cleared, otherwise no traffic is passed!
1691 if ((status & GEM_INTR_PCS) != 0) {
1693 GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS) |
1694 GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS);
1695 if ((status2 & GEM_MII_INTERRUP_LINK) != 0)
1696 device_printf(sc->sc_dev,
1697 "%s: PCS link status changed\n", __func__);
1699 if ((status & GEM_MAC_CONTROL_STATUS) != 0) {
1700 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_STATUS);
1701 if ((status2 & GEM_MAC_PAUSED) != 0)
1702 device_printf(sc->sc_dev,
1703 "%s: PAUSE received (PAUSE time %d slots)\n",
1704 __func__, GEM_MAC_PAUSE_TIME(status2));
1705 if ((status2 & GEM_MAC_PAUSE) != 0)
1706 device_printf(sc->sc_dev,
1707 "%s: transited to PAUSE state\n", __func__);
1708 if ((status2 & GEM_MAC_RESUME) != 0)
1709 device_printf(sc->sc_dev,
1710 "%s: transited to non-PAUSE state\n", __func__);
1712 if ((status & GEM_INTR_MIF) != 0)
1713 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__);
1716 if (__predict_false(status &
1717 (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0)
1718 gem_eint(sc, status);
1720 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1723 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1726 if (__predict_false((status & GEM_INTR_TX_MAC) != 0)) {
1727 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS);
1729 ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
1730 GEM_MAC_TX_PEAK_EXP)) != 0)
1731 device_printf(sc->sc_dev,
1732 "MAC TX fault, status %x\n", status2);
1734 (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) {
1735 sc->sc_ifp->if_oerrors++;
1736 gem_init_locked(sc);
1739 if (__predict_false((status & GEM_INTR_RX_MAC) != 0)) {
1740 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS);
1742 * At least with GEM_SUN_GEM and some GEM_SUN_ERI
1743 * revisions GEM_MAC_RX_OVERFLOW happen often due to a
1744 * silicon bug so handle them silently. Moreover, it's
1745 * likely that the receiver has hung so we reset it.
1747 if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) {
1748 sc->sc_ifp->if_ierrors++;
1749 gem_reset_rxdma(sc);
1750 } else if ((status2 &
1751 ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0)
1752 device_printf(sc->sc_dev,
1753 "MAC RX fault, status %x\n", status2);
1759 gem_watchdog(struct gem_softc *sc)
1761 struct ifnet *ifp = sc->sc_ifp;
1763 GEM_LOCK_ASSERT(sc, MA_OWNED);
1767 "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x",
1768 __func__, GEM_BANK1_READ_4(sc, GEM_RX_CONFIG),
1769 GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS),
1770 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG));
1772 "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x",
1773 __func__, GEM_BANK1_READ_4(sc, GEM_TX_CONFIG),
1774 GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS),
1775 GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG));
1778 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1781 if ((sc->sc_flags & GEM_LINK) != 0)
1782 device_printf(sc->sc_dev, "device timeout\n");
1783 else if (bootverbose)
1784 device_printf(sc->sc_dev, "device timeout (no link)\n");
1787 /* Try to get more packets going. */
1788 gem_init_locked(sc);
1789 gem_start_locked(ifp);
1790 return (EJUSTRETURN);
1794 gem_mifinit(struct gem_softc *sc)
1797 /* Configure the MIF in frame mode. */
1798 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG,
1799 GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA);
1805 * The MII interface supports at least three different operating modes:
1807 * Bitbang mode is implemented using data, clock and output enable registers.
1809 * Frame mode is implemented by loading a complete frame into the frame
1810 * register and polling the valid bit for completion.
1812 * Polling mode uses the frame register but completion is indicated by
1817 gem_mii_readreg(device_t dev, int phy, int reg)
1819 struct gem_softc *sc;
1823 #ifdef GEM_DEBUG_PHY
1824 printf("%s: phy %d reg %d\n", __func__, phy, reg);
1827 sc = device_get_softc(dev);
1828 if (sc->sc_phyad != -1 && phy != sc->sc_phyad)
1831 if ((sc->sc_flags & GEM_SERDES) != 0) {
1834 reg = GEM_MII_CONTROL;
1837 reg = GEM_MII_STATUS;
1846 reg = GEM_MII_ANLPAR;
1849 return (EXTSR_1000XFDX | EXTSR_1000XHDX);
1851 device_printf(sc->sc_dev,
1852 "%s: unhandled register %d\n", __func__, reg);
1855 return (GEM_BANK1_READ_4(sc, reg));
1858 /* Construct the frame command. */
1859 v = GEM_MIF_FRAME_READ |
1860 (phy << GEM_MIF_PHY_SHIFT) |
1861 (reg << GEM_MIF_REG_SHIFT);
1863 GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v);
1864 GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4,
1865 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1866 for (n = 0; n < 100; n++) {
1868 v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME);
1869 if (v & GEM_MIF_FRAME_TA0)
1870 return (v & GEM_MIF_FRAME_DATA);
1873 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
1878 gem_mii_writereg(device_t dev, int phy, int reg, int val)
1880 struct gem_softc *sc;
1884 #ifdef GEM_DEBUG_PHY
1885 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__);
1888 sc = device_get_softc(dev);
1889 if (sc->sc_phyad != -1 && phy != sc->sc_phyad)
1892 if ((sc->sc_flags & GEM_SERDES) != 0) {
1895 reg = GEM_MII_STATUS;
1898 reg = GEM_MII_CONTROL;
1899 if ((val & GEM_MII_CONTROL_RESET) == 0)
1901 GEM_BANK1_WRITE_4(sc, GEM_MII_CONTROL, val);
1902 GEM_BANK1_BARRIER(sc, GEM_MII_CONTROL, 4,
1903 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1904 if (!GEM_BANK1_BITWAIT(sc, GEM_MII_CONTROL,
1905 GEM_MII_CONTROL_RESET, 0))
1906 device_printf(sc->sc_dev,
1907 "cannot reset PCS\n");
1910 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, 0);
1911 GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4,
1912 BUS_SPACE_BARRIER_WRITE);
1913 GEM_BANK1_WRITE_4(sc, GEM_MII_ANAR, val);
1914 GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
1915 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
1916 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG,
1917 GEM_MII_CONFIG_ENABLE);
1920 reg = GEM_MII_ANLPAR;
1923 device_printf(sc->sc_dev,
1924 "%s: unhandled register %d\n", __func__, reg);
1927 GEM_BANK1_WRITE_4(sc, reg, val);
1931 /* Construct the frame command. */
1932 v = GEM_MIF_FRAME_WRITE |
1933 (phy << GEM_MIF_PHY_SHIFT) |
1934 (reg << GEM_MIF_REG_SHIFT) |
1935 (val & GEM_MIF_FRAME_DATA);
1937 GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v);
1938 GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4,
1939 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1940 for (n = 0; n < 100; n++) {
1942 v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME);
1943 if (v & GEM_MIF_FRAME_TA0)
1947 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
1952 gem_mii_statchg(device_t dev)
1954 struct gem_softc *sc;
1956 uint32_t rxcfg, txcfg, v;
1958 sc = device_get_softc(dev);
1960 GEM_LOCK_ASSERT(sc, MA_OWNED);
1963 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
1964 device_printf(sc->sc_dev, "%s: status change: PHY = %d\n",
1965 __func__, sc->sc_phyad);
1968 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
1969 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
1970 sc->sc_flags |= GEM_LINK;
1972 sc->sc_flags &= ~GEM_LINK;
1974 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) {
1986 * The configuration done here corresponds to the steps F) and
1987 * G) and as far as enabling of RX and TX MAC goes also step H)
1988 * of the initialization sequence outlined in section 3.2.1 of
1989 * the GEM Gigabit Ethernet ASIC Specification.
1992 rxcfg = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG);
1993 rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE);
1994 txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT;
1995 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1996 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS;
1997 else if (gigabit != 0) {
1998 rxcfg |= GEM_MAC_RX_CARR_EXTEND;
1999 txcfg |= GEM_MAC_TX_CARR_EXTEND;
2001 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 0);
2002 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4,
2003 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2004 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0))
2005 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
2006 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, txcfg);
2007 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 0);
2008 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
2009 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2010 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
2011 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
2012 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, rxcfg);
2014 v = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_CONFIG) &
2015 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
2017 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2018 IFM_ETH_RXPAUSE) != 0)
2019 v |= GEM_MAC_CC_RX_PAUSE;
2020 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2021 IFM_ETH_TXPAUSE) != 0)
2022 v |= GEM_MAC_CC_TX_PAUSE;
2024 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_CONFIG, v);
2026 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 &&
2028 GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME,
2029 GEM_MAC_SLOT_TIME_CARR_EXTEND);
2031 GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME,
2032 GEM_MAC_SLOT_TIME_NORMAL);
2034 /* XIF Configuration */
2035 v = GEM_MAC_XIF_LINK_LED;
2036 v |= GEM_MAC_XIF_TX_MII_ENA;
2037 if ((sc->sc_flags & GEM_SERDES) == 0) {
2038 if ((GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) &
2039 GEM_MIF_CONFIG_PHY_SEL) != 0) {
2040 /* External MII needs echo disable if half duplex. */
2041 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2043 v |= GEM_MAC_XIF_ECHO_DISABL;
2046 * Internal MII needs buffer enable.
2047 * XXX buffer enable makes only sense for an
2050 v |= GEM_MAC_XIF_MII_BUF_ENA;
2053 v |= GEM_MAC_XIF_GMII_MODE;
2054 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2055 v |= GEM_MAC_XIF_FDPLX_LED;
2056 GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, v);
2058 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2059 (sc->sc_flags & GEM_LINK) != 0) {
2060 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG,
2061 txcfg | GEM_MAC_TX_ENABLE);
2062 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG,
2063 rxcfg | GEM_MAC_RX_ENABLE);
2068 gem_mediachange(struct ifnet *ifp)
2070 struct gem_softc *sc = ifp->if_softc;
2073 /* XXX add support for serial media. */
2076 error = mii_mediachg(sc->sc_mii);
2082 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2084 struct gem_softc *sc = ifp->if_softc;
2087 if ((ifp->if_flags & IFF_UP) == 0) {
2092 mii_pollstat(sc->sc_mii);
2093 ifmr->ifm_active = sc->sc_mii->mii_media_active;
2094 ifmr->ifm_status = sc->sc_mii->mii_media_status;
2099 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2101 struct gem_softc *sc = ifp->if_softc;
2102 struct ifreq *ifr = (struct ifreq *)data;
2109 if ((ifp->if_flags & IFF_UP) != 0) {
2110 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2111 ((ifp->if_flags ^ sc->sc_ifflags) &
2112 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
2115 gem_init_locked(sc);
2116 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2118 if ((ifp->if_flags & IFF_LINK0) != 0)
2119 sc->sc_csum_features |= CSUM_UDP;
2121 sc->sc_csum_features &= ~CSUM_UDP;
2122 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2123 ifp->if_hwassist = sc->sc_csum_features;
2124 sc->sc_ifflags = ifp->if_flags;
2135 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
2139 ifp->if_capenable = ifr->ifr_reqcap;
2140 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2141 ifp->if_hwassist = sc->sc_csum_features;
2143 ifp->if_hwassist = 0;
2147 error = ether_ioctl(ifp, cmd, data);
2155 gem_setladrf(struct gem_softc *sc)
2157 struct ifnet *ifp = sc->sc_ifp;
2158 struct ifmultiaddr *inm;
2163 GEM_LOCK_ASSERT(sc, MA_OWNED);
2165 /* Get the current RX configuration. */
2166 v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG);
2169 * Turn off promiscuous mode, promiscuous group mode (all multicast),
2170 * and hash filter. Depending on the case, the right bit will be
2173 v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_HASH_FILTER |
2174 GEM_MAC_RX_PROMISC_GRP);
2176 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);
2177 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
2178 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2179 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER,
2181 device_printf(sc->sc_dev, "cannot disable RX hash filter\n");
2183 if ((ifp->if_flags & IFF_PROMISC) != 0) {
2184 v |= GEM_MAC_RX_PROMISCUOUS;
2187 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
2188 v |= GEM_MAC_RX_PROMISC_GRP;
2193 * Set up multicast address filter by passing all multicast
2194 * addresses through a crc generator, and then using the high
2195 * order 8 bits as an index into the 256 bit logical address
2196 * filter. The high order 4 bits selects the word, while the
2197 * other 4 bits select the bit within the word (where bit 0
2201 /* Clear the hash table. */
2202 memset(hash, 0, sizeof(hash));
2204 if_maddr_rlock(ifp);
2205 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
2206 if (inm->ifma_addr->sa_family != AF_LINK)
2208 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
2209 inm->ifma_addr), ETHER_ADDR_LEN);
2211 /* We just want the 8 most significant bits. */
2214 /* Set the corresponding bit in the filter. */
2215 hash[crc >> 4] |= 1 << (15 - (crc & 15));
2217 if_maddr_runlock(ifp);
2219 v |= GEM_MAC_RX_HASH_FILTER;
2221 /* Now load the hash table into the chip (if we are using it). */
2222 for (i = 0; i < 16; i++)
2223 GEM_BANK1_WRITE_4(sc,
2224 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),
2228 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);