2 * Copyright (C) 2001 Eduardo Horvath.
3 * Copyright (c) 2001-2003 Thomas Moestl
4 * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers
42 #if 0 /* XXX: In case of emergency, re-enable this. */
43 #define GEM_RINT_TIMEOUT
46 #include <sys/param.h>
47 #include <sys/systm.h>
49 #include <sys/callout.h>
50 #include <sys/endian.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
55 #include <sys/module.h>
56 #include <sys/mutex.h>
57 #include <sys/socket.h>
58 #include <sys/sockio.h>
62 #include <net/ethernet.h>
64 #include <net/if_arp.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/if_vlan_var.h>
70 #include <netinet/in.h>
71 #include <netinet/in_systm.h>
72 #include <netinet/ip.h>
73 #include <netinet/tcp.h>
74 #include <netinet/udp.h>
76 #include <machine/bus.h>
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
81 #include <dev/gem/if_gemreg.h>
82 #include <dev/gem/if_gemvar.h>
84 CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192);
85 CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192);
87 #define GEM_TRIES 10000
90 * The hardware supports basic TCP/UDP checksum offloading. However,
91 * the hardware doesn't compensate the checksum for UDP datagram which
92 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled
93 * by default. It can be reactivated by setting special link option
94 * link0 with ifconfig(8).
96 #define GEM_CSUM_FEATURES (CSUM_TCP)
98 static int gem_add_rxbuf(struct gem_softc *sc, int idx);
99 static int gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r,
100 uint32_t clr, uint32_t set);
101 static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs,
102 int nsegs, int error);
103 static int gem_disable_rx(struct gem_softc *sc);
104 static int gem_disable_tx(struct gem_softc *sc);
105 static void gem_eint(struct gem_softc *sc, u_int status);
106 static void gem_init(void *xsc);
107 static void gem_init_locked(struct gem_softc *sc);
108 static void gem_init_regs(struct gem_softc *sc);
109 static int gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
110 static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head);
111 static int gem_meminit(struct gem_softc *sc);
112 static void gem_mifinit(struct gem_softc *sc);
113 static void gem_reset(struct gem_softc *sc);
114 static int gem_reset_rx(struct gem_softc *sc);
115 static void gem_reset_rxdma(struct gem_softc *sc);
116 static int gem_reset_tx(struct gem_softc *sc);
117 static u_int gem_ringsize(u_int sz);
118 static void gem_rint(struct gem_softc *sc);
119 #ifdef GEM_RINT_TIMEOUT
120 static void gem_rint_timeout(void *arg);
122 static inline void gem_rxcksum(struct mbuf *m, uint64_t flags);
123 static void gem_rxdrain(struct gem_softc *sc);
124 static void gem_setladrf(struct gem_softc *sc);
125 static void gem_start(struct ifnet *ifp);
126 static void gem_start_locked(struct ifnet *ifp);
127 static void gem_stop(struct ifnet *ifp, int disable);
128 static void gem_tick(void *arg);
129 static void gem_tint(struct gem_softc *sc);
130 static inline void gem_txkick(struct gem_softc *sc);
131 static int gem_watchdog(struct gem_softc *sc);
133 devclass_t gem_devclass;
134 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0);
135 MODULE_DEPEND(gem, miibus, 1, 1, 1);
139 #define KTR_GEM KTR_CT2
142 #define GEM_BANK1_BITWAIT(sc, r, clr, set) \
143 gem_bitwait((sc), GEM_RES_BANK1, (r), (clr), (set))
144 #define GEM_BANK2_BITWAIT(sc, r, clr, set) \
145 gem_bitwait((sc), GEM_RES_BANK2, (r), (clr), (set))
148 gem_attach(struct gem_softc *sc)
150 struct gem_txsoft *txs;
156 device_printf(sc->sc_dev, "flags=0x%x\n", sc->sc_flags);
158 /* Set up ifnet structure. */
159 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
162 sc->sc_csum_features = GEM_CSUM_FEATURES;
164 if_initname(ifp, device_get_name(sc->sc_dev),
165 device_get_unit(sc->sc_dev));
166 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
167 ifp->if_start = gem_start;
168 ifp->if_ioctl = gem_ioctl;
169 ifp->if_init = gem_init;
170 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN);
171 ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN;
172 IFQ_SET_READY(&ifp->if_snd);
174 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
175 #ifdef GEM_RINT_TIMEOUT
176 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
179 /* Make sure the chip is stopped. */
182 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
183 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
184 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,
185 NULL, &sc->sc_pdmatag);
189 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
190 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
191 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
195 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
196 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
197 MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES,
198 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
202 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
203 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
204 sizeof(struct gem_control_data), 1,
205 sizeof(struct gem_control_data), 0,
206 NULL, NULL, &sc->sc_cdmatag);
211 * Allocate the control data structures, create and load the
214 if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
215 (void **)&sc->sc_control_data,
216 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
217 &sc->sc_cddmamap)) != 0) {
218 device_printf(sc->sc_dev,
219 "unable to allocate control data, error = %d\n", error);
224 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
225 sc->sc_control_data, sizeof(struct gem_control_data),
226 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
227 device_printf(sc->sc_dev,
228 "unable to load control data DMA map, error = %d\n",
234 * Initialize the transmit job descriptors.
236 STAILQ_INIT(&sc->sc_txfreeq);
237 STAILQ_INIT(&sc->sc_txdirtyq);
240 * Create the transmit buffer DMA maps.
243 for (i = 0; i < GEM_TXQUEUELEN; i++) {
244 txs = &sc->sc_txsoft[i];
245 txs->txs_mbuf = NULL;
247 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
248 &txs->txs_dmamap)) != 0) {
249 device_printf(sc->sc_dev,
250 "unable to create TX DMA map %d, error = %d\n",
254 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
258 * Create the receive buffer DMA maps.
260 for (i = 0; i < GEM_NRXDESC; i++) {
261 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0,
262 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
263 device_printf(sc->sc_dev,
264 "unable to create RX DMA map %d, error = %d\n",
268 sc->sc_rxsoft[i].rxs_mbuf = NULL;
271 /* Bad things will happen when touching this register on ERI. */
272 if (sc->sc_variant != GEM_SUN_ERI)
273 GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE,
274 GEM_MII_DATAPATH_MII);
279 * Look for an external PHY.
282 v = GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG);
283 if ((v & GEM_MIF_CONFIG_MDI1) != 0) {
284 v |= GEM_MIF_CONFIG_PHY_SEL;
285 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v);
286 switch (sc->sc_variant) {
288 sc->sc_phyad = GEM_PHYAD_EXTERNAL;
294 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus,
295 gem_mediachange, gem_mediastatus);
299 * Fall back on an internal PHY if no external PHY was found.
300 * Note that with Apple (K2) GMACs GEM_MIF_CONFIG_MDI0 can't be
301 * trusted when the firmware has powered down the chip.
304 ((v & GEM_MIF_CONFIG_MDI0) != 0 || GEM_IS_APPLE(sc))) {
305 v &= ~GEM_MIF_CONFIG_PHY_SEL;
306 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v);
307 switch (sc->sc_variant) {
309 case GEM_APPLE_K2_GMAC:
310 sc->sc_phyad = GEM_PHYAD_INTERNAL;
313 sc->sc_phyad = GEM_PHYAD_EXTERNAL;
319 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus,
320 gem_mediachange, gem_mediastatus);
324 * Try the external PCS SERDES if we didn't find any PHYs.
326 if (error != 0 && sc->sc_variant == GEM_SUN_GEM) {
327 GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE,
328 GEM_MII_DATAPATH_SERDES);
329 GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
330 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
331 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
332 sc->sc_flags |= GEM_SERDES;
333 sc->sc_phyad = GEM_PHYAD_EXTERNAL;
334 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus,
335 gem_mediachange, gem_mediastatus);
339 device_printf(sc->sc_dev, "PHY probe failed: %d\n", error);
342 sc->sc_mii = device_get_softc(sc->sc_miibus);
345 * From this point forward, the attachment cannot fail. A failure
346 * before this point releases all resources that may have been
350 /* Get RX FIFO size. */
351 sc->sc_rxfifosize = 64 *
352 GEM_BANK1_READ_4(sc, GEM_RX_FIFO_SIZE);
354 /* Get TX FIFO size. */
355 v = GEM_BANK1_READ_4(sc, GEM_TX_FIFO_SIZE);
356 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
357 sc->sc_rxfifosize / 1024, v / 16);
359 /* Attach the interface. */
360 ether_ifattach(ifp, sc->sc_enaddr);
363 * Tell the upper layer(s) we support long frames/checksum offloads.
365 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
366 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
367 ifp->if_hwassist |= sc->sc_csum_features;
368 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
373 * Free any resources we've allocated during the failed attach
374 * attempt. Do this in reverse order and fall through.
377 for (i = 0; i < GEM_NRXDESC; i++)
378 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
379 bus_dmamap_destroy(sc->sc_rdmatag,
380 sc->sc_rxsoft[i].rxs_dmamap);
382 for (i = 0; i < GEM_TXQUEUELEN; i++)
383 if (sc->sc_txsoft[i].txs_dmamap != NULL)
384 bus_dmamap_destroy(sc->sc_tdmatag,
385 sc->sc_txsoft[i].txs_dmamap);
386 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
388 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
391 bus_dma_tag_destroy(sc->sc_cdmatag);
393 bus_dma_tag_destroy(sc->sc_tdmatag);
395 bus_dma_tag_destroy(sc->sc_rdmatag);
397 bus_dma_tag_destroy(sc->sc_pdmatag);
404 gem_detach(struct gem_softc *sc)
406 struct ifnet *ifp = sc->sc_ifp;
413 callout_drain(&sc->sc_tick_ch);
414 #ifdef GEM_RINT_TIMEOUT
415 callout_drain(&sc->sc_rx_ch);
418 device_delete_child(sc->sc_dev, sc->sc_miibus);
420 for (i = 0; i < GEM_NRXDESC; i++)
421 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
422 bus_dmamap_destroy(sc->sc_rdmatag,
423 sc->sc_rxsoft[i].rxs_dmamap);
424 for (i = 0; i < GEM_TXQUEUELEN; i++)
425 if (sc->sc_txsoft[i].txs_dmamap != NULL)
426 bus_dmamap_destroy(sc->sc_tdmatag,
427 sc->sc_txsoft[i].txs_dmamap);
428 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
429 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
430 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
432 bus_dma_tag_destroy(sc->sc_cdmatag);
433 bus_dma_tag_destroy(sc->sc_tdmatag);
434 bus_dma_tag_destroy(sc->sc_rdmatag);
435 bus_dma_tag_destroy(sc->sc_pdmatag);
439 gem_suspend(struct gem_softc *sc)
441 struct ifnet *ifp = sc->sc_ifp;
449 gem_resume(struct gem_softc *sc)
451 struct ifnet *ifp = sc->sc_ifp;
455 * On resume all registers have to be initialized again like
458 sc->sc_flags &= ~GEM_INITED;
459 if (ifp->if_flags & IFF_UP)
465 gem_rxcksum(struct mbuf *m, uint64_t flags)
467 struct ether_header *eh;
471 int32_t hlen, len, pktlen;
475 pktlen = m->m_pkthdr.len;
476 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
478 eh = mtod(m, struct ether_header *);
479 if (eh->ether_type != htons(ETHERTYPE_IP))
481 ip = (struct ip *)(eh + 1);
482 if (ip->ip_v != IPVERSION)
485 hlen = ip->ip_hl << 2;
486 pktlen -= sizeof(struct ether_header);
487 if (hlen < sizeof(struct ip))
489 if (ntohs(ip->ip_len) < hlen)
491 if (ntohs(ip->ip_len) != pktlen)
493 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
494 return; /* Cannot handle fragmented packet. */
498 if (pktlen < (hlen + sizeof(struct tcphdr)))
502 if (pktlen < (hlen + sizeof(struct udphdr)))
504 uh = (struct udphdr *)((uint8_t *)ip + hlen);
506 return; /* no checksum */
512 cksum = ~(flags & GEM_RD_CHECKSUM);
513 /* checksum fixup for IP options */
514 len = hlen - sizeof(struct ip);
516 opts = (uint16_t *)(ip + 1);
517 for (; len > 0; len -= sizeof(uint16_t), opts++) {
518 temp32 = cksum - *opts;
519 temp32 = (temp32 >> 16) + (temp32 & 65535);
520 cksum = temp32 & 65535;
523 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
524 m->m_pkthdr.csum_data = cksum;
528 gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
530 struct gem_softc *sc = xsc;
535 panic("%s: bad control buffer segment count", __func__);
536 sc->sc_cddma = segs[0].ds_addr;
542 struct gem_softc *sc = arg;
543 struct ifnet *ifp = sc->sc_ifp;
546 GEM_LOCK_ASSERT(sc, MA_OWNED);
549 * Unload collision and error counters.
551 ifp->if_collisions +=
552 GEM_BANK1_READ_4(sc, GEM_MAC_NORM_COLL_CNT) +
553 GEM_BANK1_READ_4(sc, GEM_MAC_FIRST_COLL_CNT);
554 v = GEM_BANK1_READ_4(sc, GEM_MAC_EXCESS_COLL_CNT) +
555 GEM_BANK1_READ_4(sc, GEM_MAC_LATE_COLL_CNT);
556 ifp->if_collisions += v;
557 ifp->if_oerrors += v;
559 GEM_BANK1_READ_4(sc, GEM_MAC_RX_LEN_ERR_CNT) +
560 GEM_BANK1_READ_4(sc, GEM_MAC_RX_ALIGN_ERR) +
561 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CRC_ERR_CNT) +
562 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CODE_VIOL);
565 * Then clear the hardware counters.
567 GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
568 GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
569 GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
570 GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
571 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0);
572 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0);
573 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0);
574 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0);
576 mii_tick(sc->sc_mii);
578 if (gem_watchdog(sc) == EJUSTRETURN)
581 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
585 gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r, uint32_t clr,
591 for (i = GEM_TRIES; i--; DELAY(100)) {
592 reg = GEM_BANKN_READ_M(bank, 4, sc, r);
593 if ((reg & clr) == 0 && (reg & set) == set)
600 gem_reset(struct gem_softc *sc)
604 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
609 /* Do a full reset. */
610 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX);
611 GEM_BANK2_BARRIER(sc, GEM_RESET, 4,
612 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
613 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
614 device_printf(sc->sc_dev, "cannot reset device\n");
618 gem_rxdrain(struct gem_softc *sc)
620 struct gem_rxsoft *rxs;
623 for (i = 0; i < GEM_NRXDESC; i++) {
624 rxs = &sc->sc_rxsoft[i];
625 if (rxs->rxs_mbuf != NULL) {
626 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
627 BUS_DMASYNC_POSTREAD);
628 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
629 m_freem(rxs->rxs_mbuf);
630 rxs->rxs_mbuf = NULL;
636 gem_stop(struct ifnet *ifp, int disable)
638 struct gem_softc *sc = ifp->if_softc;
639 struct gem_txsoft *txs;
642 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
645 callout_stop(&sc->sc_tick_ch);
646 #ifdef GEM_RINT_TIMEOUT
647 callout_stop(&sc->sc_rx_ch);
654 * Release any queued transmit buffers.
656 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
657 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
658 if (txs->txs_ndescs != 0) {
659 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
660 BUS_DMASYNC_POSTWRITE);
661 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
662 if (txs->txs_mbuf != NULL) {
663 m_freem(txs->txs_mbuf);
664 txs->txs_mbuf = NULL;
667 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
674 * Mark the interface down and cancel the watchdog timer.
676 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
677 sc->sc_flags &= ~GEM_LINK;
678 sc->sc_wdog_timer = 0;
682 gem_reset_rx(struct gem_softc *sc)
686 * Resetting while DMA is in progress can cause a bus hang, so we
690 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 0);
691 GEM_BANK1_BARRIER(sc, GEM_RX_CONFIG, 4,
692 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
693 if (!GEM_BANK1_BITWAIT(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0))
694 device_printf(sc->sc_dev, "cannot disable RX DMA\n");
696 /* Finally, reset the ERX. */
697 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX);
698 GEM_BANK2_BARRIER(sc, GEM_RESET, 4,
699 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
700 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX,
702 device_printf(sc->sc_dev, "cannot reset receiver\n");
709 * Reset the receiver DMA engine.
711 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW
712 * etc in order to reset the receiver DMA engine only and not do a full
713 * reset which amongst others also downs the link and clears the FIFOs.
716 gem_reset_rxdma(struct gem_softc *sc)
720 if (gem_reset_rx(sc) != 0)
721 return (gem_init_locked(sc));
722 for (i = 0; i < GEM_NRXDESC; i++)
723 if (sc->sc_rxsoft[i].rxs_mbuf != NULL)
724 GEM_UPDATE_RXDESC(sc, i);
726 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
728 /* NOTE: we use only 32-bit DMA addresses here. */
729 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
730 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
731 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
732 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
733 gem_ringsize(GEM_NRXDESC /* XXX */) |
734 ((ETHER_HDR_LEN + sizeof(struct ip)) <<
735 GEM_RX_CONFIG_CXM_START_SHFT) |
736 (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
737 (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT));
738 /* Adjust for the SBus clock probably isn't worth the fuzz. */
739 GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING,
740 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
741 GEM_RX_BLANKING_TIME_SHIFT) | 6);
742 GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
743 (3 * sc->sc_rxfifosize / 256) |
744 ((sc->sc_rxfifosize / 256) << 12));
745 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
746 GEM_BANK1_READ_4(sc, GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN);
747 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK,
748 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
749 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG,
750 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) | GEM_MAC_RX_ENABLE);
754 gem_reset_tx(struct gem_softc *sc)
758 * Resetting while DMA is in progress can cause a bus hang, so we
762 GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, 0);
763 GEM_BANK1_BARRIER(sc, GEM_TX_CONFIG, 4,
764 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
765 if (!GEM_BANK1_BITWAIT(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0))
766 device_printf(sc->sc_dev, "cannot disable TX DMA\n");
768 /* Finally, reset the ETX. */
769 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_TX);
770 GEM_BANK2_BARRIER(sc, GEM_RESET, 4,
771 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
772 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX,
774 device_printf(sc->sc_dev, "cannot reset transmitter\n");
781 gem_disable_rx(struct gem_softc *sc)
784 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG,
785 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) & ~GEM_MAC_RX_ENABLE);
786 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
787 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
788 return (GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE,
793 gem_disable_tx(struct gem_softc *sc)
796 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG,
797 GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG) & ~GEM_MAC_TX_ENABLE);
798 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4,
799 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
800 return (GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE,
805 gem_meminit(struct gem_softc *sc)
807 struct gem_rxsoft *rxs;
810 GEM_LOCK_ASSERT(sc, MA_OWNED);
813 * Initialize the transmit descriptor ring.
815 for (i = 0; i < GEM_NTXDESC; i++) {
816 sc->sc_txdescs[i].gd_flags = 0;
817 sc->sc_txdescs[i].gd_addr = 0;
819 sc->sc_txfree = GEM_MAXTXFREE;
824 * Initialize the receive descriptor and receive job
827 for (i = 0; i < GEM_NRXDESC; i++) {
828 rxs = &sc->sc_rxsoft[i];
829 if (rxs->rxs_mbuf == NULL) {
830 if ((error = gem_add_rxbuf(sc, i)) != 0) {
831 device_printf(sc->sc_dev,
832 "unable to allocate or map RX buffer %d, "
833 "error = %d\n", i, error);
835 * XXX we should attempt to run with fewer
836 * receive buffers instead of just failing.
842 GEM_INIT_RXDESC(sc, i);
846 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
852 gem_ringsize(u_int sz)
857 return (GEM_RING_SZ_32);
859 return (GEM_RING_SZ_64);
861 return (GEM_RING_SZ_128);
863 return (GEM_RING_SZ_256);
865 return (GEM_RING_SZ_512);
867 return (GEM_RING_SZ_1024);
869 return (GEM_RING_SZ_2048);
871 return (GEM_RING_SZ_4096);
873 return (GEM_RING_SZ_8192);
875 printf("%s: invalid ring size %d\n", __func__, sz);
876 return (GEM_RING_SZ_32);
883 struct gem_softc *sc = xsc;
891 * Initialization of interface; set up initialization block
892 * and transmit/receive descriptor rings.
895 gem_init_locked(struct gem_softc *sc)
897 struct ifnet *ifp = sc->sc_ifp;
900 GEM_LOCK_ASSERT(sc, MA_OWNED);
903 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev),
907 * Initialization sequence. The numbered steps below correspond
908 * to the sequence outlined in section 6.3.5.1 in the Ethernet
909 * Channel Engine manual (part of the PCIO manual).
910 * See also the STP2002-STQ document from Sun Microsystems.
913 /* step 1 & 2. Reset the Ethernet Channel. */
917 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev),
921 /* Re-initialize the MIF. */
924 /* step 3. Setup data structures in host memory. */
925 if (gem_meminit(sc) != 0)
928 /* step 4. TX MAC registers & counters */
931 /* step 5. RX MAC registers & counters */
934 /* step 6 & 7. Program Descriptor Ring Base Addresses. */
935 /* NOTE: we use only 32-bit DMA addresses here. */
936 GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_HI, 0);
937 GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
939 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
940 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
942 CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx",
943 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma);
946 /* step 8. Global Configuration & Interrupt Mask */
949 * Set the internal arbitration to "infinite" bursts of the
950 * maximum length of 31 * 64 bytes so DMA transfers aren't
951 * split up in cache line size chunks. This greatly improves
953 * Enable silicon bug workarounds for the Apple variants.
955 GEM_BANK1_WRITE_4(sc, GEM_CONFIG,
956 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
957 ((sc->sc_flags & GEM_PCI) != 0 ? GEM_CONFIG_BURST_INF :
958 GEM_CONFIG_BURST_64) | (GEM_IS_APPLE(sc) ?
959 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
961 GEM_BANK1_WRITE_4(sc, GEM_INTMASK,
962 ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE |
963 GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR |
966 | GEM_INTR_PCS | GEM_INTR_MIF
969 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK,
970 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
971 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_MASK,
972 GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
973 GEM_MAC_TX_PEAK_EXP);
975 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
976 ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME));
978 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
979 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME);
982 /* step 9. ETX Configuration: use mostly default values. */
985 v = gem_ringsize(GEM_NTXDESC);
986 /* Set TX FIFO threshold and enable DMA. */
987 v |= ((sc->sc_variant == GEM_SUN_ERI ? 0x100 : 0x4ff) << 10) &
988 GEM_TX_CONFIG_TXFIFO_TH;
989 GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN);
991 /* step 10. ERX Configuration */
993 /* Encode Receive Descriptor ring size. */
994 v = gem_ringsize(GEM_NRXDESC /* XXX */);
995 /* RX TCP/UDP checksum offset */
996 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) <<
997 GEM_RX_CONFIG_CXM_START_SHFT);
998 /* Set RX FIFO threshold, set first byte offset and enable DMA. */
999 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
1000 v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
1001 (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT) |
1002 GEM_RX_CONFIG_RXDMA_EN);
1004 /* Adjust for the SBus clock probably isn't worth the fuzz. */
1005 GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING,
1006 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
1007 GEM_RX_BLANKING_TIME_SHIFT) | 6);
1010 * The following value is for an OFF Threshold of about 3/4 full
1011 * and an ON Threshold of 1/4 full.
1013 GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
1014 (3 * sc->sc_rxfifosize / 256) |
1015 ((sc->sc_rxfifosize / 256) << 12));
1017 /* step 11. Configure Media. */
1019 /* step 12. RX_MAC Configuration Register */
1020 v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG);
1021 v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
1022 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 0);
1023 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
1024 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1025 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
1026 device_printf(sc->sc_dev, "cannot configure RX MAC\n");
1027 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);
1029 /* step 13. TX_MAC Configuration Register */
1030 v = GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG);
1031 v |= GEM_MAC_TX_ENABLE;
1032 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 0);
1033 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4,
1034 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1035 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0))
1036 device_printf(sc->sc_dev, "cannot configure TX MAC\n");
1037 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, v);
1039 /* step 14. Issue Transmit Pending command. */
1041 /* step 15. Give the reciever a swift kick. */
1042 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
1044 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1045 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1047 mii_mediachg(sc->sc_mii);
1049 /* Start the one second timer. */
1050 sc->sc_wdog_timer = 0;
1051 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
1055 gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head)
1057 bus_dma_segment_t txsegs[GEM_NTXSEGS];
1058 struct gem_txsoft *txs;
1061 uint64_t cflags, flags;
1062 int error, nexttx, nsegs, offset, seg;
1064 GEM_LOCK_ASSERT(sc, MA_OWNED);
1066 /* Get a work queue entry. */
1067 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1068 /* Ran out of descriptors. */
1073 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
1074 if (M_WRITABLE(*m_head) == 0) {
1075 m = m_dup(*m_head, M_DONTWAIT);
1081 offset = sizeof(struct ether_header);
1082 m = m_pullup(*m_head, offset + sizeof(struct ip));
1087 ip = (struct ip *)(mtod(m, caddr_t) + offset);
1088 offset += (ip->ip_hl << 2);
1089 cflags = offset << GEM_TD_CXSUM_STARTSHFT |
1090 ((offset + m->m_pkthdr.csum_data) <<
1091 GEM_TD_CXSUM_STUFFSHFT) | GEM_TD_CXSUM_ENABLE;
1095 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1096 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1097 if (error == EFBIG) {
1098 m = m_collapse(*m_head, M_DONTWAIT, GEM_NTXSEGS);
1105 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag,
1106 txs->txs_dmamap, *m_head, txsegs, &nsegs,
1113 } else if (error != 0)
1115 /* If nsegs is wrong then the stack is corrupt. */
1116 KASSERT(nsegs <= GEM_NTXSEGS,
1117 ("%s: too many DMA segments (%d)", __func__, nsegs));
1125 * Ensure we have enough descriptors free to describe
1126 * the packet. Note, we always reserve one descriptor
1127 * at the end of the ring as a termination point, in
1128 * order to prevent wrap-around.
1130 if (nsegs > sc->sc_txfree - 1) {
1131 txs->txs_ndescs = 0;
1132 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1136 txs->txs_ndescs = nsegs;
1137 txs->txs_firstdesc = sc->sc_txnext;
1138 nexttx = txs->txs_firstdesc;
1139 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) {
1142 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)",
1143 __func__, seg, nexttx, txsegs[seg].ds_len,
1144 txsegs[seg].ds_addr,
1145 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr));
1147 sc->sc_txdescs[nexttx].gd_addr =
1148 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr);
1149 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE,
1150 ("%s: segment size too large!", __func__));
1151 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE;
1152 sc->sc_txdescs[nexttx].gd_flags =
1153 GEM_DMA_WRITE(sc, flags | cflags);
1154 txs->txs_lastdesc = nexttx;
1157 /* Set EOP on the last descriptor. */
1159 CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d",
1160 __func__, seg, nexttx);
1162 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |=
1163 GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET);
1165 /* Lastly set SOP on the first descriptor. */
1167 CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d",
1168 __func__, seg, nexttx);
1170 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
1172 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1173 GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME |
1174 GEM_TD_START_OF_PACKET);
1176 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1177 GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET);
1179 /* Sync the DMA map. */
1180 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1181 BUS_DMASYNC_PREWRITE);
1184 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
1185 __func__, txs->txs_firstdesc, txs->txs_lastdesc,
1188 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1189 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1190 txs->txs_mbuf = *m_head;
1192 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1193 sc->sc_txfree -= txs->txs_ndescs;
1199 gem_init_regs(struct gem_softc *sc)
1201 const u_char *laddr = IF_LLADDR(sc->sc_ifp);
1203 GEM_LOCK_ASSERT(sc, MA_OWNED);
1205 /* These registers are not cleared on reset. */
1206 if ((sc->sc_flags & GEM_INITED) == 0) {
1208 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG0, 0);
1209 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG1, 8);
1210 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG2, 4);
1212 /* min frame length */
1213 GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1214 /* max frame length and max burst size */
1215 GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MAX_FRAME,
1216 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
1218 /* more magic values */
1219 GEM_BANK1_WRITE_4(sc, GEM_MAC_PREAMBLE_LEN, 0x7);
1220 GEM_BANK1_WRITE_4(sc, GEM_MAC_JAM_SIZE, 0x4);
1221 GEM_BANK1_WRITE_4(sc, GEM_MAC_ATTEMPT_LIMIT, 0x10);
1222 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_TYPE, 0x8088);
1224 /* random number seed */
1225 GEM_BANK1_WRITE_4(sc, GEM_MAC_RANDOM_SEED,
1226 ((laddr[5] << 8) | laddr[4]) & 0x3ff);
1228 /* secondary MAC address: 0:0:0:0:0:0 */
1229 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR3, 0);
1230 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR4, 0);
1231 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR5, 0);
1233 /* MAC control address: 01:80:c2:00:00:01 */
1234 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR6, 0x0001);
1235 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR7, 0xc200);
1236 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR8, 0x0180);
1238 /* MAC filter address: 0:0:0:0:0:0 */
1239 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER0, 0);
1240 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER1, 0);
1241 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER2, 0);
1242 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK1_2, 0);
1243 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK0, 0);
1245 sc->sc_flags |= GEM_INITED;
1248 /* Counters need to be zeroed. */
1249 GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
1250 GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
1251 GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
1252 GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
1253 GEM_BANK1_WRITE_4(sc, GEM_MAC_DEFER_TMR_CNT, 0);
1254 GEM_BANK1_WRITE_4(sc, GEM_MAC_PEAK_ATTEMPTS, 0);
1255 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_FRAME_COUNT, 0);
1256 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0);
1257 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0);
1258 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0);
1259 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0);
1261 /* Set XOFF PAUSE time. */
1262 GEM_BANK1_WRITE_4(sc, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
1264 /* Set the station address. */
1265 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR0, (laddr[4] << 8) | laddr[5]);
1266 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR1, (laddr[2] << 8) | laddr[3]);
1267 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR2, (laddr[0] << 8) | laddr[1]);
1269 /* Enable MII outputs. */
1270 GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA);
1274 gem_start(struct ifnet *ifp)
1276 struct gem_softc *sc = ifp->if_softc;
1279 gem_start_locked(ifp);
1284 gem_txkick(struct gem_softc *sc)
1288 * Update the TX kick register. This register has to point to the
1289 * descriptor after the last valid one and for optimum performance
1290 * should be incremented in multiples of 4 (the DMA engine fetches/
1291 * updates descriptors in batches of 4).
1294 CTR3(KTR_GEM, "%s: %s: kicking TX %d",
1295 device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
1297 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1298 GEM_BANK1_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext);
1302 gem_start_locked(struct ifnet *ifp)
1304 struct gem_softc *sc = ifp->if_softc;
1308 GEM_LOCK_ASSERT(sc, MA_OWNED);
1310 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1311 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0)
1315 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d",
1316 device_get_name(sc->sc_dev), __func__, sc->sc_txfree,
1321 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) {
1322 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1325 if (gem_load_txmbuf(sc, &m) != 0) {
1328 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1329 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1332 if ((sc->sc_txnext % 4) == 0) {
1345 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
1346 device_get_name(sc->sc_dev), sc->sc_txnext);
1349 /* Set a watchdog timer in case the chip flakes out. */
1350 sc->sc_wdog_timer = 5;
1352 CTR3(KTR_GEM, "%s: %s: watchdog %d",
1353 device_get_name(sc->sc_dev), __func__,
1360 gem_tint(struct gem_softc *sc)
1362 struct ifnet *ifp = sc->sc_ifp;
1363 struct gem_txsoft *txs;
1369 GEM_LOCK_ASSERT(sc, MA_OWNED);
1371 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1375 * Go through our TX list and free mbufs for those
1376 * frames that have been transmitted.
1379 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1380 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1382 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1383 printf(" txsoft %p transmit chain:\n", txs);
1384 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
1385 printf("descriptor %d: ", i);
1386 printf("gd_flags: 0x%016llx\t",
1387 (long long)GEM_DMA_READ(sc,
1388 sc->sc_txdescs[i].gd_flags));
1389 printf("gd_addr: 0x%016llx\n",
1390 (long long)GEM_DMA_READ(sc,
1391 sc->sc_txdescs[i].gd_addr));
1392 if (i == txs->txs_lastdesc)
1399 * In theory, we could harvest some descriptors before
1400 * the ring is empty, but that's a bit complicated.
1402 * GEM_TX_COMPLETION points to the last descriptor
1405 txlast = GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION);
1407 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, "
1408 "txs->txs_lastdesc = %d, txlast = %d",
1409 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1411 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1412 if ((txlast >= txs->txs_firstdesc) &&
1413 (txlast <= txs->txs_lastdesc))
1416 /* Ick -- this command wraps. */
1417 if ((txlast >= txs->txs_firstdesc) ||
1418 (txlast <= txs->txs_lastdesc))
1423 CTR1(KTR_GEM, "%s: releasing a descriptor", __func__);
1425 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1427 sc->sc_txfree += txs->txs_ndescs;
1429 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1430 BUS_DMASYNC_POSTWRITE);
1431 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1432 if (txs->txs_mbuf != NULL) {
1433 m_freem(txs->txs_mbuf);
1434 txs->txs_mbuf = NULL;
1437 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1444 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx "
1445 "GEM_TX_COMPLETION %x",
1446 __func__, GEM_BANK1_READ_4(sc, GEM_TX_STATE_MACHINE),
1447 ((long long)GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_HI) << 32) |
1448 GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_LO),
1449 GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION));
1453 if (sc->sc_txfree == GEM_NTXDESC - 1)
1457 * We freed some descriptors, so reset IFF_DRV_OACTIVE
1460 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1461 if (STAILQ_EMPTY(&sc->sc_txdirtyq))
1462 sc->sc_wdog_timer = 0;
1463 gem_start_locked(ifp);
1467 CTR3(KTR_GEM, "%s: %s: watchdog %d",
1468 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1472 #ifdef GEM_RINT_TIMEOUT
1474 gem_rint_timeout(void *arg)
1476 struct gem_softc *sc = arg;
1478 GEM_LOCK_ASSERT(sc, MA_OWNED);
1485 gem_rint(struct gem_softc *sc)
1487 struct ifnet *ifp = sc->sc_ifp;
1492 GEM_LOCK_ASSERT(sc, MA_OWNED);
1494 #ifdef GEM_RINT_TIMEOUT
1495 callout_stop(&sc->sc_rx_ch);
1498 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1502 * Read the completion register once. This limits
1503 * how long the following loop can execute.
1505 rxcomp = GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION);
1507 CTR3(KTR_GEM, "%s: sc->sc_rxptr %d, complete %d",
1508 __func__, sc->sc_rxptr, rxcomp);
1510 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1511 for (; sc->sc_rxptr != rxcomp;) {
1512 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf;
1513 rxstat = GEM_DMA_READ(sc,
1514 sc->sc_rxdescs[sc->sc_rxptr].gd_flags);
1516 if (rxstat & GEM_RD_OWN) {
1517 #ifdef GEM_RINT_TIMEOUT
1519 * The descriptor is still marked as owned, although
1520 * it is supposed to have completed. This has been
1521 * observed on some machines. Just exiting here
1522 * might leave the packet sitting around until another
1523 * one arrives to trigger a new interrupt, which is
1524 * generally undesirable, so set up a timeout.
1526 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS,
1527 gem_rint_timeout, sc);
1533 if (rxstat & GEM_RD_BAD_CRC) {
1535 device_printf(sc->sc_dev, "receive error: CRC error\n");
1536 GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1542 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1543 printf(" rxsoft %p descriptor %d: ",
1544 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr);
1545 printf("gd_flags: 0x%016llx\t",
1546 (long long)GEM_DMA_READ(sc,
1547 sc->sc_rxdescs[sc->sc_rxptr].gd_flags));
1548 printf("gd_addr: 0x%016llx\n",
1549 (long long)GEM_DMA_READ(sc,
1550 sc->sc_rxdescs[sc->sc_rxptr].gd_addr));
1555 * Allocate a new mbuf cluster. If that fails, we are
1556 * out of memory, and must drop the packet and recycle
1557 * the buffer that's already attached to this descriptor.
1559 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) {
1561 GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1567 * Update the RX kick register. This register has to point
1568 * to the descriptor after the last valid one (before the
1569 * current batch) and for optimum performance should be
1570 * incremented in multiples of 4 (the DMA engine fetches/
1571 * updates descriptors in batches of 4).
1573 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr);
1574 if ((sc->sc_rxptr % 4) == 0) {
1576 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1577 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK,
1578 (sc->sc_rxptr + GEM_NRXDESC - 4) &
1583 if (rxstat & GEM_RD_OWN)
1589 m->m_data += ETHER_ALIGN; /* first byte offset */
1590 m->m_pkthdr.rcvif = ifp;
1591 m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat);
1593 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1594 gem_rxcksum(m, rxstat);
1598 (*ifp->if_input)(ifp, m);
1603 CTR3(KTR_GEM, "%s: done sc->sc_rxptr %d, complete %d", __func__,
1604 sc->sc_rxptr, GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION));
1609 gem_add_rxbuf(struct gem_softc *sc, int idx)
1611 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1613 bus_dma_segment_t segs[1];
1616 GEM_LOCK_ASSERT(sc, MA_OWNED);
1618 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1621 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1624 /* Bzero the packet to check DMA. */
1625 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1628 if (rxs->rxs_mbuf != NULL) {
1629 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1630 BUS_DMASYNC_POSTREAD);
1631 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
1634 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap,
1635 m, segs, &nsegs, BUS_DMA_NOWAIT);
1637 device_printf(sc->sc_dev,
1638 "cannot load RS DMA map %d, error = %d\n", idx, error);
1642 /* If nsegs is wrong then the stack is corrupt. */
1644 ("%s: too many DMA segments (%d)", __func__, nsegs));
1646 rxs->rxs_paddr = segs[0].ds_addr;
1648 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1649 BUS_DMASYNC_PREREAD);
1651 GEM_INIT_RXDESC(sc, idx);
1657 gem_eint(struct gem_softc *sc, u_int status)
1660 sc->sc_ifp->if_ierrors++;
1661 if ((status & GEM_INTR_RX_TAG_ERR) != 0) {
1662 gem_reset_rxdma(sc);
1666 device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status);
1667 if ((status & GEM_INTR_BERR) != 0) {
1668 if ((sc->sc_flags & GEM_PCI) != 0)
1669 printf(", PCI bus error 0x%x\n",
1670 GEM_BANK1_READ_4(sc, GEM_PCI_ERROR_STATUS));
1672 printf(", SBus error 0x%x\n",
1673 GEM_BANK1_READ_4(sc, GEM_SBUS_STATUS));
1680 struct gem_softc *sc = v;
1681 uint32_t status, status2;
1684 status = GEM_BANK1_READ_4(sc, GEM_STATUS);
1687 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x",
1688 device_get_name(sc->sc_dev), __func__,
1689 (status >> GEM_STATUS_TX_COMPLETION_SHFT), (u_int)status);
1692 * PCS interrupts must be cleared, otherwise no traffic is passed!
1694 if ((status & GEM_INTR_PCS) != 0) {
1696 GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS) |
1697 GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS);
1698 if ((status2 & GEM_MII_INTERRUP_LINK) != 0)
1699 device_printf(sc->sc_dev,
1700 "%s: PCS link status changed\n", __func__);
1702 if ((status & GEM_MAC_CONTROL_STATUS) != 0) {
1703 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_STATUS);
1704 if ((status2 & GEM_MAC_PAUSED) != 0)
1705 device_printf(sc->sc_dev,
1706 "%s: PAUSE received (PAUSE time %d slots)\n",
1707 __func__, GEM_MAC_PAUSE_TIME(status2));
1708 if ((status2 & GEM_MAC_PAUSE) != 0)
1709 device_printf(sc->sc_dev,
1710 "%s: transited to PAUSE state\n", __func__);
1711 if ((status2 & GEM_MAC_RESUME) != 0)
1712 device_printf(sc->sc_dev,
1713 "%s: transited to non-PAUSE state\n", __func__);
1715 if ((status & GEM_INTR_MIF) != 0)
1716 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__);
1719 if (__predict_false(status &
1720 (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0)
1721 gem_eint(sc, status);
1723 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1726 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1729 if (__predict_false((status & GEM_INTR_TX_MAC) != 0)) {
1730 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS);
1732 ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
1733 GEM_MAC_TX_PEAK_EXP)) != 0)
1734 device_printf(sc->sc_dev,
1735 "MAC TX fault, status %x\n", status2);
1737 (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) {
1738 sc->sc_ifp->if_oerrors++;
1739 gem_init_locked(sc);
1742 if (__predict_false((status & GEM_INTR_RX_MAC) != 0)) {
1743 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS);
1745 * At least with GEM_SUN_GEM and some GEM_SUN_ERI
1746 * revisions GEM_MAC_RX_OVERFLOW happen often due to a
1747 * silicon bug so handle them silently. Moreover, it's
1748 * likely that the receiver has hung so we reset it.
1750 if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) {
1751 sc->sc_ifp->if_ierrors++;
1752 gem_reset_rxdma(sc);
1753 } else if ((status2 &
1754 ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0)
1755 device_printf(sc->sc_dev,
1756 "MAC RX fault, status %x\n", status2);
1762 gem_watchdog(struct gem_softc *sc)
1764 struct ifnet *ifp = sc->sc_ifp;
1766 GEM_LOCK_ASSERT(sc, MA_OWNED);
1770 "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x",
1771 __func__, GEM_BANK1_READ_4(sc, GEM_RX_CONFIG),
1772 GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS),
1773 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG));
1775 "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x",
1776 __func__, GEM_BANK1_READ_4(sc, GEM_TX_CONFIG),
1777 GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS),
1778 GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG));
1781 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1784 if ((sc->sc_flags & GEM_LINK) != 0)
1785 device_printf(sc->sc_dev, "device timeout\n");
1786 else if (bootverbose)
1787 device_printf(sc->sc_dev, "device timeout (no link)\n");
1790 /* Try to get more packets going. */
1791 gem_init_locked(sc);
1792 gem_start_locked(ifp);
1793 return (EJUSTRETURN);
1797 gem_mifinit(struct gem_softc *sc)
1800 /* Configure the MIF in frame mode. */
1801 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG,
1802 GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA);
1808 * The MII interface supports at least three different operating modes:
1810 * Bitbang mode is implemented using data, clock and output enable registers.
1812 * Frame mode is implemented by loading a complete frame into the frame
1813 * register and polling the valid bit for completion.
1815 * Polling mode uses the frame register but completion is indicated by
1820 gem_mii_readreg(device_t dev, int phy, int reg)
1822 struct gem_softc *sc;
1826 #ifdef GEM_DEBUG_PHY
1827 printf("%s: phy %d reg %d\n", __func__, phy, reg);
1830 sc = device_get_softc(dev);
1831 if (sc->sc_phyad != -1 && phy != sc->sc_phyad)
1834 if ((sc->sc_flags & GEM_SERDES) != 0) {
1837 reg = GEM_MII_CONTROL;
1840 reg = GEM_MII_STATUS;
1849 reg = GEM_MII_ANLPAR;
1852 return (EXTSR_1000XFDX | EXTSR_1000XHDX);
1854 device_printf(sc->sc_dev,
1855 "%s: unhandled register %d\n", __func__, reg);
1858 return (GEM_BANK1_READ_4(sc, reg));
1861 /* Construct the frame command. */
1862 v = GEM_MIF_FRAME_READ |
1863 (phy << GEM_MIF_PHY_SHIFT) |
1864 (reg << GEM_MIF_REG_SHIFT);
1866 GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v);
1867 GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4,
1868 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1869 for (n = 0; n < 100; n++) {
1871 v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME);
1872 if (v & GEM_MIF_FRAME_TA0)
1873 return (v & GEM_MIF_FRAME_DATA);
1876 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
1881 gem_mii_writereg(device_t dev, int phy, int reg, int val)
1883 struct gem_softc *sc;
1887 #ifdef GEM_DEBUG_PHY
1888 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__);
1891 sc = device_get_softc(dev);
1892 if (sc->sc_phyad != -1 && phy != sc->sc_phyad)
1895 if ((sc->sc_flags & GEM_SERDES) != 0) {
1898 reg = GEM_MII_STATUS;
1901 reg = GEM_MII_CONTROL;
1902 if ((val & GEM_MII_CONTROL_RESET) == 0)
1904 GEM_BANK1_WRITE_4(sc, GEM_MII_CONTROL, val);
1905 GEM_BANK1_BARRIER(sc, GEM_MII_CONTROL, 4,
1906 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1907 if (!GEM_BANK1_BITWAIT(sc, GEM_MII_CONTROL,
1908 GEM_MII_CONTROL_RESET, 0))
1909 device_printf(sc->sc_dev,
1910 "cannot reset PCS\n");
1913 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, 0);
1914 GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4,
1915 BUS_SPACE_BARRIER_WRITE);
1916 GEM_BANK1_WRITE_4(sc, GEM_MII_ANAR, val);
1917 GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
1918 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
1919 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG,
1920 GEM_MII_CONFIG_ENABLE);
1923 reg = GEM_MII_ANLPAR;
1926 device_printf(sc->sc_dev,
1927 "%s: unhandled register %d\n", __func__, reg);
1930 GEM_BANK1_WRITE_4(sc, reg, val);
1934 /* Construct the frame command. */
1935 v = GEM_MIF_FRAME_WRITE |
1936 (phy << GEM_MIF_PHY_SHIFT) |
1937 (reg << GEM_MIF_REG_SHIFT) |
1938 (val & GEM_MIF_FRAME_DATA);
1940 GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v);
1941 GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4,
1942 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1943 for (n = 0; n < 100; n++) {
1945 v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME);
1946 if (v & GEM_MIF_FRAME_TA0)
1950 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
1955 gem_mii_statchg(device_t dev)
1957 struct gem_softc *sc;
1959 uint32_t rxcfg, txcfg, v;
1961 sc = device_get_softc(dev);
1963 GEM_LOCK_ASSERT(sc, MA_OWNED);
1966 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
1967 device_printf(sc->sc_dev, "%s: status change: PHY = %d\n",
1968 __func__, sc->sc_phyad);
1971 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
1972 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
1973 sc->sc_flags |= GEM_LINK;
1975 sc->sc_flags &= ~GEM_LINK;
1977 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) {
1989 * The configuration done here corresponds to the steps F) and
1990 * G) and as far as enabling of RX and TX MAC goes also step H)
1991 * of the initialization sequence outlined in section 3.2.1 of
1992 * the GEM Gigabit Ethernet ASIC Specification.
1995 rxcfg = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG);
1996 rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE);
1997 txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT;
1998 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1999 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS;
2000 else if (gigabit != 0) {
2001 rxcfg |= GEM_MAC_RX_CARR_EXTEND;
2002 txcfg |= GEM_MAC_TX_CARR_EXTEND;
2004 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 0);
2005 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4,
2006 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2007 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0))
2008 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
2009 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, txcfg);
2010 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 0);
2011 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
2012 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2013 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
2014 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
2015 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, rxcfg);
2017 v = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_CONFIG) &
2018 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
2020 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2021 IFM_ETH_RXPAUSE) != 0)
2022 v |= GEM_MAC_CC_RX_PAUSE;
2023 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2024 IFM_ETH_TXPAUSE) != 0)
2025 v |= GEM_MAC_CC_TX_PAUSE;
2027 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_CONFIG, v);
2029 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 &&
2031 GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME,
2032 GEM_MAC_SLOT_TIME_CARR_EXTEND);
2034 GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME,
2035 GEM_MAC_SLOT_TIME_NORMAL);
2037 /* XIF Configuration */
2038 v = GEM_MAC_XIF_LINK_LED;
2039 v |= GEM_MAC_XIF_TX_MII_ENA;
2040 if ((sc->sc_flags & GEM_SERDES) == 0) {
2041 if ((GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) &
2042 GEM_MIF_CONFIG_PHY_SEL) != 0) {
2043 /* External MII needs echo disable if half duplex. */
2044 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2046 v |= GEM_MAC_XIF_ECHO_DISABL;
2049 * Internal MII needs buffer enable.
2050 * XXX buffer enable makes only sense for an
2053 v |= GEM_MAC_XIF_MII_BUF_ENA;
2056 v |= GEM_MAC_XIF_GMII_MODE;
2057 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2058 v |= GEM_MAC_XIF_FDPLX_LED;
2059 GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, v);
2061 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2062 (sc->sc_flags & GEM_LINK) != 0) {
2063 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG,
2064 txcfg | GEM_MAC_TX_ENABLE);
2065 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG,
2066 rxcfg | GEM_MAC_RX_ENABLE);
2071 gem_mediachange(struct ifnet *ifp)
2073 struct gem_softc *sc = ifp->if_softc;
2076 /* XXX add support for serial media. */
2079 error = mii_mediachg(sc->sc_mii);
2085 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2087 struct gem_softc *sc = ifp->if_softc;
2090 if ((ifp->if_flags & IFF_UP) == 0) {
2095 mii_pollstat(sc->sc_mii);
2096 ifmr->ifm_active = sc->sc_mii->mii_media_active;
2097 ifmr->ifm_status = sc->sc_mii->mii_media_status;
2102 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2104 struct gem_softc *sc = ifp->if_softc;
2105 struct ifreq *ifr = (struct ifreq *)data;
2112 if ((ifp->if_flags & IFF_UP) != 0) {
2113 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2114 ((ifp->if_flags ^ sc->sc_ifflags) &
2115 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
2118 gem_init_locked(sc);
2119 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2121 if ((ifp->if_flags & IFF_LINK0) != 0)
2122 sc->sc_csum_features |= CSUM_UDP;
2124 sc->sc_csum_features &= ~CSUM_UDP;
2125 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2126 ifp->if_hwassist = sc->sc_csum_features;
2127 sc->sc_ifflags = ifp->if_flags;
2138 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
2142 ifp->if_capenable = ifr->ifr_reqcap;
2143 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2144 ifp->if_hwassist = sc->sc_csum_features;
2146 ifp->if_hwassist = 0;
2150 error = ether_ioctl(ifp, cmd, data);
2158 gem_setladrf(struct gem_softc *sc)
2160 struct ifnet *ifp = sc->sc_ifp;
2161 struct ifmultiaddr *inm;
2166 GEM_LOCK_ASSERT(sc, MA_OWNED);
2168 /* Get the current RX configuration. */
2169 v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG);
2172 * Turn off promiscuous mode, promiscuous group mode (all multicast),
2173 * and hash filter. Depending on the case, the right bit will be
2176 v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_HASH_FILTER |
2177 GEM_MAC_RX_PROMISC_GRP);
2179 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);
2180 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
2181 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2182 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER,
2184 device_printf(sc->sc_dev, "cannot disable RX hash filter\n");
2186 if ((ifp->if_flags & IFF_PROMISC) != 0) {
2187 v |= GEM_MAC_RX_PROMISCUOUS;
2190 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
2191 v |= GEM_MAC_RX_PROMISC_GRP;
2196 * Set up multicast address filter by passing all multicast
2197 * addresses through a crc generator, and then using the high
2198 * order 8 bits as an index into the 256 bit logical address
2199 * filter. The high order 4 bits selects the word, while the
2200 * other 4 bits select the bit within the word (where bit 0
2204 /* Clear the hash table. */
2205 memset(hash, 0, sizeof(hash));
2207 if_maddr_rlock(ifp);
2208 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
2209 if (inm->ifma_addr->sa_family != AF_LINK)
2211 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
2212 inm->ifma_addr), ETHER_ADDR_LEN);
2214 /* We just want the 8 most significant bits. */
2217 /* Set the corresponding bit in the filter. */
2218 hash[crc >> 4] |= 1 << (15 - (crc & 15));
2220 if_maddr_runlock(ifp);
2222 v |= GEM_MAC_RX_HASH_FILTER;
2224 /* Now load the hash table into the chip (if we are using it). */
2225 for (i = 0; i < 16; i++)
2226 GEM_BANK1_WRITE_4(sc,
2227 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),
2231 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);