2 * SPDX-License-Identifier: BSD-2-Clause-NetBSD
4 * Copyright (C) 2001 Eduardo Horvath.
5 * Copyright (c) 2001-2003 Thomas Moestl
6 * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers
44 #if 0 /* XXX: In case of emergency, re-enable this. */
45 #define GEM_RINT_TIMEOUT
48 #include <sys/param.h>
49 #include <sys/systm.h>
51 #include <sys/callout.h>
52 #include <sys/endian.h>
54 #include <sys/malloc.h>
55 #include <sys/kernel.h>
57 #include <sys/module.h>
58 #include <sys/mutex.h>
59 #include <sys/socket.h>
60 #include <sys/sockio.h>
64 #include <net/ethernet.h>
66 #include <net/if_var.h>
67 #include <net/if_arp.h>
68 #include <net/if_dl.h>
69 #include <net/if_media.h>
70 #include <net/if_types.h>
71 #include <net/if_vlan_var.h>
73 #include <netinet/in.h>
74 #include <netinet/in_systm.h>
75 #include <netinet/ip.h>
76 #include <netinet/tcp.h>
77 #include <netinet/udp.h>
79 #include <machine/bus.h>
81 #include <dev/mii/mii.h>
82 #include <dev/mii/miivar.h>
84 #include <dev/gem/if_gemreg.h>
85 #include <dev/gem/if_gemvar.h>
87 CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192);
88 CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192);
90 #define GEM_TRIES 10000
93 * The hardware supports basic TCP/UDP checksum offloading. However,
94 * the hardware doesn't compensate the checksum for UDP datagram which
95 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled
96 * by default. It can be reactivated by setting special link option
97 * link0 with ifconfig(8).
99 #define GEM_CSUM_FEATURES (CSUM_TCP)
101 static int gem_add_rxbuf(struct gem_softc *sc, int idx);
102 static int gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r,
103 uint32_t clr, uint32_t set);
104 static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs,
105 int nsegs, int error);
106 static int gem_disable_rx(struct gem_softc *sc);
107 static int gem_disable_tx(struct gem_softc *sc);
108 static void gem_eint(struct gem_softc *sc, u_int status);
109 static void gem_init(void *xsc);
110 static void gem_init_locked(struct gem_softc *sc);
111 static void gem_init_regs(struct gem_softc *sc);
112 static int gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
113 static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head);
114 static int gem_meminit(struct gem_softc *sc);
115 static void gem_mifinit(struct gem_softc *sc);
116 static void gem_reset(struct gem_softc *sc);
117 static int gem_reset_rx(struct gem_softc *sc);
118 static void gem_reset_rxdma(struct gem_softc *sc);
119 static int gem_reset_tx(struct gem_softc *sc);
120 static u_int gem_ringsize(u_int sz);
121 static void gem_rint(struct gem_softc *sc);
122 #ifdef GEM_RINT_TIMEOUT
123 static void gem_rint_timeout(void *arg);
125 static inline void gem_rxcksum(struct mbuf *m, uint64_t flags);
126 static void gem_rxdrain(struct gem_softc *sc);
127 static void gem_setladrf(struct gem_softc *sc);
128 static void gem_start(struct ifnet *ifp);
129 static void gem_start_locked(struct ifnet *ifp);
130 static void gem_stop(struct ifnet *ifp, int disable);
131 static void gem_tick(void *arg);
132 static void gem_tint(struct gem_softc *sc);
133 static inline void gem_txkick(struct gem_softc *sc);
134 static int gem_watchdog(struct gem_softc *sc);
136 devclass_t gem_devclass;
137 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0);
138 MODULE_DEPEND(gem, miibus, 1, 1, 1);
142 #define KTR_GEM KTR_SPARE2
145 #define GEM_BANK1_BITWAIT(sc, r, clr, set) \
146 gem_bitwait((sc), GEM_RES_BANK1, (r), (clr), (set))
147 #define GEM_BANK2_BITWAIT(sc, r, clr, set) \
148 gem_bitwait((sc), GEM_RES_BANK2, (r), (clr), (set))
151 gem_attach(struct gem_softc *sc)
153 struct gem_txsoft *txs;
159 device_printf(sc->sc_dev, "flags=0x%x\n", sc->sc_flags);
161 /* Set up ifnet structure. */
162 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
165 sc->sc_csum_features = GEM_CSUM_FEATURES;
167 if_initname(ifp, device_get_name(sc->sc_dev),
168 device_get_unit(sc->sc_dev));
169 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
170 ifp->if_start = gem_start;
171 ifp->if_ioctl = gem_ioctl;
172 ifp->if_init = gem_init;
173 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN);
174 ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN;
175 IFQ_SET_READY(&ifp->if_snd);
177 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
178 #ifdef GEM_RINT_TIMEOUT
179 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
182 /* Make sure the chip is stopped. */
185 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
186 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
187 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,
188 NULL, &sc->sc_pdmatag);
192 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
193 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
194 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
198 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
199 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
200 MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES,
201 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
205 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
206 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
207 sizeof(struct gem_control_data), 1,
208 sizeof(struct gem_control_data), 0,
209 NULL, NULL, &sc->sc_cdmatag);
214 * Allocate the control data structures, create and load the
217 if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
218 (void **)&sc->sc_control_data,
219 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
220 &sc->sc_cddmamap)) != 0) {
221 device_printf(sc->sc_dev,
222 "unable to allocate control data, error = %d\n", error);
227 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
228 sc->sc_control_data, sizeof(struct gem_control_data),
229 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
230 device_printf(sc->sc_dev,
231 "unable to load control data DMA map, error = %d\n",
237 * Initialize the transmit job descriptors.
239 STAILQ_INIT(&sc->sc_txfreeq);
240 STAILQ_INIT(&sc->sc_txdirtyq);
243 * Create the transmit buffer DMA maps.
246 for (i = 0; i < GEM_TXQUEUELEN; i++) {
247 txs = &sc->sc_txsoft[i];
248 txs->txs_mbuf = NULL;
250 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
251 &txs->txs_dmamap)) != 0) {
252 device_printf(sc->sc_dev,
253 "unable to create TX DMA map %d, error = %d\n",
257 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
261 * Create the receive buffer DMA maps.
263 for (i = 0; i < GEM_NRXDESC; i++) {
264 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0,
265 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
266 device_printf(sc->sc_dev,
267 "unable to create RX DMA map %d, error = %d\n",
271 sc->sc_rxsoft[i].rxs_mbuf = NULL;
274 /* Bypass probing PHYs if we already know for sure to use a SERDES. */
275 if ((sc->sc_flags & GEM_SERDES) != 0)
278 /* Bad things will happen when touching this register on ERI. */
279 if (sc->sc_variant != GEM_SUN_ERI) {
280 GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE,
281 GEM_MII_DATAPATH_MII);
282 GEM_BANK1_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4,
283 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
289 * Look for an external PHY.
292 v = GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG);
293 if ((v & GEM_MIF_CONFIG_MDI1) != 0) {
294 v |= GEM_MIF_CONFIG_PHY_SEL;
295 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v);
296 GEM_BANK1_BARRIER(sc, GEM_MIF_CONFIG, 4,
297 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
298 switch (sc->sc_variant) {
300 phy = GEM_PHYAD_EXTERNAL;
306 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
307 gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, phy,
308 MII_OFFSET_ANY, MIIF_DOPAUSE);
312 * Fall back on an internal PHY if no external PHY was found.
313 * Note that with Apple (K2) GMACs GEM_MIF_CONFIG_MDI0 can't be
314 * trusted when the firmware has powered down the chip.
317 ((v & GEM_MIF_CONFIG_MDI0) != 0 || GEM_IS_APPLE(sc))) {
318 v &= ~GEM_MIF_CONFIG_PHY_SEL;
319 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v);
320 GEM_BANK1_BARRIER(sc, GEM_MIF_CONFIG, 4,
321 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
322 switch (sc->sc_variant) {
324 case GEM_APPLE_K2_GMAC:
325 phy = GEM_PHYAD_INTERNAL;
328 phy = GEM_PHYAD_EXTERNAL;
334 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
335 gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, phy,
336 MII_OFFSET_ANY, MIIF_DOPAUSE);
340 * Try the external PCS SERDES if we didn't find any PHYs.
342 if (error != 0 && sc->sc_variant == GEM_SUN_GEM) {
344 GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE,
345 GEM_MII_DATAPATH_SERDES);
346 GEM_BANK1_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4,
347 BUS_SPACE_BARRIER_WRITE);
348 GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
349 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
350 GEM_BANK1_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4,
351 BUS_SPACE_BARRIER_WRITE);
352 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
353 GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4,
354 BUS_SPACE_BARRIER_WRITE);
355 sc->sc_flags |= GEM_SERDES;
356 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
357 gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK,
358 GEM_PHYAD_EXTERNAL, MII_OFFSET_ANY, MIIF_DOPAUSE);
361 device_printf(sc->sc_dev, "attaching PHYs failed\n");
364 sc->sc_mii = device_get_softc(sc->sc_miibus);
367 * From this point forward, the attachment cannot fail. A failure
368 * before this point releases all resources that may have been
372 /* Get RX FIFO size. */
373 sc->sc_rxfifosize = 64 *
374 GEM_BANK1_READ_4(sc, GEM_RX_FIFO_SIZE);
376 /* Get TX FIFO size. */
377 v = GEM_BANK1_READ_4(sc, GEM_TX_FIFO_SIZE);
378 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
379 sc->sc_rxfifosize / 1024, v / 16);
381 /* Attach the interface. */
382 ether_ifattach(ifp, sc->sc_enaddr);
385 * Tell the upper layer(s) we support long frames/checksum offloads.
387 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
388 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
389 ifp->if_hwassist |= sc->sc_csum_features;
390 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
395 * Free any resources we've allocated during the failed attach
396 * attempt. Do this in reverse order and fall through.
399 for (i = 0; i < GEM_NRXDESC; i++)
400 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
401 bus_dmamap_destroy(sc->sc_rdmatag,
402 sc->sc_rxsoft[i].rxs_dmamap);
404 for (i = 0; i < GEM_TXQUEUELEN; i++)
405 if (sc->sc_txsoft[i].txs_dmamap != NULL)
406 bus_dmamap_destroy(sc->sc_tdmatag,
407 sc->sc_txsoft[i].txs_dmamap);
408 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
410 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
413 bus_dma_tag_destroy(sc->sc_cdmatag);
415 bus_dma_tag_destroy(sc->sc_tdmatag);
417 bus_dma_tag_destroy(sc->sc_rdmatag);
419 bus_dma_tag_destroy(sc->sc_pdmatag);
426 gem_detach(struct gem_softc *sc)
428 struct ifnet *ifp = sc->sc_ifp;
435 callout_drain(&sc->sc_tick_ch);
436 #ifdef GEM_RINT_TIMEOUT
437 callout_drain(&sc->sc_rx_ch);
440 device_delete_child(sc->sc_dev, sc->sc_miibus);
442 for (i = 0; i < GEM_NRXDESC; i++)
443 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
444 bus_dmamap_destroy(sc->sc_rdmatag,
445 sc->sc_rxsoft[i].rxs_dmamap);
446 for (i = 0; i < GEM_TXQUEUELEN; i++)
447 if (sc->sc_txsoft[i].txs_dmamap != NULL)
448 bus_dmamap_destroy(sc->sc_tdmatag,
449 sc->sc_txsoft[i].txs_dmamap);
450 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
451 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
452 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
454 bus_dma_tag_destroy(sc->sc_cdmatag);
455 bus_dma_tag_destroy(sc->sc_tdmatag);
456 bus_dma_tag_destroy(sc->sc_rdmatag);
457 bus_dma_tag_destroy(sc->sc_pdmatag);
461 gem_suspend(struct gem_softc *sc)
463 struct ifnet *ifp = sc->sc_ifp;
471 gem_resume(struct gem_softc *sc)
473 struct ifnet *ifp = sc->sc_ifp;
477 * On resume all registers have to be initialized again like
480 sc->sc_flags &= ~GEM_INITED;
481 if (ifp->if_flags & IFF_UP)
487 gem_rxcksum(struct mbuf *m, uint64_t flags)
489 struct ether_header *eh;
493 int32_t hlen, len, pktlen;
497 pktlen = m->m_pkthdr.len;
498 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
500 eh = mtod(m, struct ether_header *);
501 if (eh->ether_type != htons(ETHERTYPE_IP))
503 ip = (struct ip *)(eh + 1);
504 if (ip->ip_v != IPVERSION)
507 hlen = ip->ip_hl << 2;
508 pktlen -= sizeof(struct ether_header);
509 if (hlen < sizeof(struct ip))
511 if (ntohs(ip->ip_len) < hlen)
513 if (ntohs(ip->ip_len) != pktlen)
515 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
516 return; /* Cannot handle fragmented packet. */
520 if (pktlen < (hlen + sizeof(struct tcphdr)))
524 if (pktlen < (hlen + sizeof(struct udphdr)))
526 uh = (struct udphdr *)((uint8_t *)ip + hlen);
528 return; /* no checksum */
534 cksum = ~(flags & GEM_RD_CHECKSUM);
535 /* checksum fixup for IP options */
536 len = hlen - sizeof(struct ip);
538 opts = (uint16_t *)(ip + 1);
539 for (; len > 0; len -= sizeof(uint16_t), opts++) {
540 temp32 = cksum - *opts;
541 temp32 = (temp32 >> 16) + (temp32 & 65535);
542 cksum = temp32 & 65535;
545 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
546 m->m_pkthdr.csum_data = cksum;
550 gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
552 struct gem_softc *sc = xsc;
557 panic("%s: bad control buffer segment count", __func__);
558 sc->sc_cddma = segs[0].ds_addr;
564 struct gem_softc *sc = arg;
565 struct ifnet *ifp = sc->sc_ifp;
568 GEM_LOCK_ASSERT(sc, MA_OWNED);
571 * Unload collision and error counters.
573 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
574 GEM_BANK1_READ_4(sc, GEM_MAC_NORM_COLL_CNT) +
575 GEM_BANK1_READ_4(sc, GEM_MAC_FIRST_COLL_CNT));
576 v = GEM_BANK1_READ_4(sc, GEM_MAC_EXCESS_COLL_CNT) +
577 GEM_BANK1_READ_4(sc, GEM_MAC_LATE_COLL_CNT);
578 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, v);
579 if_inc_counter(ifp, IFCOUNTER_OERRORS, v);
580 if_inc_counter(ifp, IFCOUNTER_IERRORS,
581 GEM_BANK1_READ_4(sc, GEM_MAC_RX_LEN_ERR_CNT) +
582 GEM_BANK1_READ_4(sc, GEM_MAC_RX_ALIGN_ERR) +
583 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CRC_ERR_CNT) +
584 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CODE_VIOL));
587 * Then clear the hardware counters.
589 GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
590 GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
591 GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
592 GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
593 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0);
594 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0);
595 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0);
596 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0);
598 mii_tick(sc->sc_mii);
600 if (gem_watchdog(sc) == EJUSTRETURN)
603 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
607 gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r, uint32_t clr,
613 for (i = GEM_TRIES; i--; DELAY(100)) {
614 reg = GEM_BANKN_READ_M(bank, 4, sc, r);
615 if ((reg & clr) == 0 && (reg & set) == set)
622 gem_reset(struct gem_softc *sc)
626 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
631 /* Do a full reset. */
632 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX |
633 (sc->sc_variant == GEM_SUN_ERI ? GEM_ERI_CACHE_LINE_SIZE <<
634 GEM_RESET_CLSZ_SHFT : 0));
635 GEM_BANK2_BARRIER(sc, GEM_RESET, 4,
636 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
637 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
638 device_printf(sc->sc_dev, "cannot reset device\n");
642 gem_rxdrain(struct gem_softc *sc)
644 struct gem_rxsoft *rxs;
647 for (i = 0; i < GEM_NRXDESC; i++) {
648 rxs = &sc->sc_rxsoft[i];
649 if (rxs->rxs_mbuf != NULL) {
650 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
651 BUS_DMASYNC_POSTREAD);
652 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
653 m_freem(rxs->rxs_mbuf);
654 rxs->rxs_mbuf = NULL;
660 gem_stop(struct ifnet *ifp, int disable)
662 struct gem_softc *sc = ifp->if_softc;
663 struct gem_txsoft *txs;
666 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
669 callout_stop(&sc->sc_tick_ch);
670 #ifdef GEM_RINT_TIMEOUT
671 callout_stop(&sc->sc_rx_ch);
678 * Release any queued transmit buffers.
680 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
681 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
682 if (txs->txs_ndescs != 0) {
683 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
684 BUS_DMASYNC_POSTWRITE);
685 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
686 if (txs->txs_mbuf != NULL) {
687 m_freem(txs->txs_mbuf);
688 txs->txs_mbuf = NULL;
691 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
698 * Mark the interface down and cancel the watchdog timer.
700 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
701 sc->sc_flags &= ~GEM_LINK;
702 sc->sc_wdog_timer = 0;
706 gem_reset_rx(struct gem_softc *sc)
710 * Resetting while DMA is in progress can cause a bus hang, so we
713 (void)gem_disable_rx(sc);
714 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 0);
715 GEM_BANK1_BARRIER(sc, GEM_RX_CONFIG, 4,
716 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
717 if (!GEM_BANK1_BITWAIT(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0))
718 device_printf(sc->sc_dev, "cannot disable RX DMA\n");
720 /* Wait 5ms extra. */
724 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX |
725 (sc->sc_variant == GEM_SUN_ERI ? GEM_ERI_CACHE_LINE_SIZE <<
726 GEM_RESET_CLSZ_SHFT : 0));
727 GEM_BANK2_BARRIER(sc, GEM_RESET, 4,
728 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
729 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX, 0)) {
730 device_printf(sc->sc_dev, "cannot reset receiver\n");
734 /* Finally, reset RX MAC. */
735 GEM_BANK1_WRITE_4(sc, GEM_MAC_RXRESET, 1);
736 GEM_BANK1_BARRIER(sc, GEM_MAC_RXRESET, 4,
737 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
738 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RXRESET, 1, 0)) {
739 device_printf(sc->sc_dev, "cannot reset RX MAC\n");
747 * Reset the receiver DMA engine.
749 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW
750 * etc in order to reset the receiver DMA engine only and not do a full
751 * reset which amongst others also downs the link and clears the FIFOs.
754 gem_reset_rxdma(struct gem_softc *sc)
758 if (gem_reset_rx(sc) != 0) {
759 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
760 return (gem_init_locked(sc));
762 for (i = 0; i < GEM_NRXDESC; i++)
763 if (sc->sc_rxsoft[i].rxs_mbuf != NULL)
764 GEM_UPDATE_RXDESC(sc, i);
766 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
768 /* NOTE: we use only 32-bit DMA addresses here. */
769 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
770 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
771 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
772 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
773 gem_ringsize(GEM_NRXDESC /* XXX */) |
774 ((ETHER_HDR_LEN + sizeof(struct ip)) <<
775 GEM_RX_CONFIG_CXM_START_SHFT) |
776 (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
777 (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT));
778 /* Adjusting for the SBus clock probably isn't worth the fuzz. */
779 GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING,
780 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
781 GEM_RX_BLANKING_TIME_SHIFT) | 6);
782 GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
783 (3 * sc->sc_rxfifosize / 256) |
784 ((sc->sc_rxfifosize / 256) << 12));
785 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
786 GEM_BANK1_READ_4(sc, GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN);
787 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK,
788 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
790 * Clear the RX filter and reprogram it. This will also set the
791 * current RX MAC configuration and enable it.
797 gem_reset_tx(struct gem_softc *sc)
801 * Resetting while DMA is in progress can cause a bus hang, so we
804 (void)gem_disable_tx(sc);
805 GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, 0);
806 GEM_BANK1_BARRIER(sc, GEM_TX_CONFIG, 4,
807 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
808 if (!GEM_BANK1_BITWAIT(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0))
809 device_printf(sc->sc_dev, "cannot disable TX DMA\n");
811 /* Wait 5ms extra. */
814 /* Finally, reset the ETX. */
815 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_TX |
816 (sc->sc_variant == GEM_SUN_ERI ? GEM_ERI_CACHE_LINE_SIZE <<
817 GEM_RESET_CLSZ_SHFT : 0));
818 GEM_BANK2_BARRIER(sc, GEM_RESET, 4,
819 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
820 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_TX, 0)) {
821 device_printf(sc->sc_dev, "cannot reset transmitter\n");
828 gem_disable_rx(struct gem_softc *sc)
831 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG,
832 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) & ~GEM_MAC_RX_ENABLE);
833 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
834 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
835 if (GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
837 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
842 gem_disable_tx(struct gem_softc *sc)
845 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG,
846 GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG) & ~GEM_MAC_TX_ENABLE);
847 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4,
848 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
849 if (GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0))
851 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
856 gem_meminit(struct gem_softc *sc)
858 struct gem_rxsoft *rxs;
861 GEM_LOCK_ASSERT(sc, MA_OWNED);
864 * Initialize the transmit descriptor ring.
866 for (i = 0; i < GEM_NTXDESC; i++) {
867 sc->sc_txdescs[i].gd_flags = 0;
868 sc->sc_txdescs[i].gd_addr = 0;
870 sc->sc_txfree = GEM_MAXTXFREE;
875 * Initialize the receive descriptor and receive job
878 for (i = 0; i < GEM_NRXDESC; i++) {
879 rxs = &sc->sc_rxsoft[i];
880 if (rxs->rxs_mbuf == NULL) {
881 if ((error = gem_add_rxbuf(sc, i)) != 0) {
882 device_printf(sc->sc_dev,
883 "unable to allocate or map RX buffer %d, "
884 "error = %d\n", i, error);
886 * XXX we should attempt to run with fewer
887 * receive buffers instead of just failing.
893 GEM_INIT_RXDESC(sc, i);
897 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
903 gem_ringsize(u_int sz)
908 return (GEM_RING_SZ_32);
910 return (GEM_RING_SZ_64);
912 return (GEM_RING_SZ_128);
914 return (GEM_RING_SZ_256);
916 return (GEM_RING_SZ_512);
918 return (GEM_RING_SZ_1024);
920 return (GEM_RING_SZ_2048);
922 return (GEM_RING_SZ_4096);
924 return (GEM_RING_SZ_8192);
926 printf("%s: invalid ring size %d\n", __func__, sz);
927 return (GEM_RING_SZ_32);
934 struct gem_softc *sc = xsc;
942 * Initialization of interface; set up initialization block
943 * and transmit/receive descriptor rings.
946 gem_init_locked(struct gem_softc *sc)
948 struct ifnet *ifp = sc->sc_ifp;
951 GEM_LOCK_ASSERT(sc, MA_OWNED);
953 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
957 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev),
961 * Initialization sequence. The numbered steps below correspond
962 * to the sequence outlined in section 6.3.5.1 in the Ethernet
963 * Channel Engine manual (part of the PCIO manual).
964 * See also the STP2002-STQ document from Sun Microsystems.
967 /* step 1 & 2. Reset the Ethernet Channel. */
971 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev),
975 if ((sc->sc_flags & GEM_SERDES) == 0)
976 /* Re-initialize the MIF. */
979 /* step 3. Setup data structures in host memory. */
980 if (gem_meminit(sc) != 0)
983 /* step 4. TX MAC registers & counters */
986 /* step 5. RX MAC registers & counters */
988 /* step 6 & 7. Program Descriptor Ring Base Addresses. */
989 /* NOTE: we use only 32-bit DMA addresses here. */
990 GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_HI, 0);
991 GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
993 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
994 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
996 CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx",
997 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma);
1000 /* step 8. Global Configuration & Interrupt Mask */
1003 * Set the internal arbitration to "infinite" bursts of the
1004 * maximum length of 31 * 64 bytes so DMA transfers aren't
1005 * split up in cache line size chunks. This greatly improves
1007 * Enable silicon bug workarounds for the Apple variants.
1009 GEM_BANK1_WRITE_4(sc, GEM_CONFIG,
1010 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
1011 ((sc->sc_flags & GEM_PCI) != 0 ? GEM_CONFIG_BURST_INF :
1012 GEM_CONFIG_BURST_64) | (GEM_IS_APPLE(sc) ?
1013 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
1015 GEM_BANK1_WRITE_4(sc, GEM_INTMASK,
1016 ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE |
1017 GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR |
1020 | GEM_INTR_PCS | GEM_INTR_MIF
1023 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK,
1024 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
1025 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_MASK,
1026 GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
1027 GEM_MAC_TX_PEAK_EXP);
1029 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
1030 ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME));
1032 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
1033 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME);
1036 /* step 9. ETX Configuration: use mostly default values. */
1039 v = gem_ringsize(GEM_NTXDESC);
1040 /* Set TX FIFO threshold and enable DMA. */
1041 v |= ((sc->sc_variant == GEM_SUN_ERI ? 0x100 : 0x4ff) << 10) &
1042 GEM_TX_CONFIG_TXFIFO_TH;
1043 GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN);
1045 /* step 10. ERX Configuration */
1047 /* Encode Receive Descriptor ring size. */
1048 v = gem_ringsize(GEM_NRXDESC /* XXX */);
1049 /* RX TCP/UDP checksum offset */
1050 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) <<
1051 GEM_RX_CONFIG_CXM_START_SHFT);
1052 /* Set RX FIFO threshold, set first byte offset and enable DMA. */
1053 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
1054 v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
1055 (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT) |
1056 GEM_RX_CONFIG_RXDMA_EN);
1058 /* Adjusting for the SBus clock probably isn't worth the fuzz. */
1059 GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING,
1060 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
1061 GEM_RX_BLANKING_TIME_SHIFT) | 6);
1064 * The following value is for an OFF Threshold of about 3/4 full
1065 * and an ON Threshold of 1/4 full.
1067 GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
1068 (3 * sc->sc_rxfifosize / 256) |
1069 ((sc->sc_rxfifosize / 256) << 12));
1071 /* step 11. Configure Media. */
1073 /* step 12. RX_MAC Configuration Register */
1074 v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG);
1075 v &= ~GEM_MAC_RX_ENABLE;
1076 v |= GEM_MAC_RX_STRIP_CRC;
1077 sc->sc_mac_rxcfg = v;
1079 * Clear the RX filter and reprogram it. This will also set the
1080 * current RX MAC configuration and enable it.
1084 /* step 13. TX_MAC Configuration Register */
1085 v = GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG);
1086 v |= GEM_MAC_TX_ENABLE;
1087 (void)gem_disable_tx(sc);
1088 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, v);
1090 /* step 14. Issue Transmit Pending command. */
1092 /* step 15. Give the receiver a swift kick. */
1093 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
1095 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1096 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1098 mii_mediachg(sc->sc_mii);
1100 /* Start the one second timer. */
1101 sc->sc_wdog_timer = 0;
1102 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
1106 gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head)
1108 bus_dma_segment_t txsegs[GEM_NTXSEGS];
1109 struct gem_txsoft *txs;
1112 uint64_t cflags, flags;
1113 int error, nexttx, nsegs, offset, seg;
1115 GEM_LOCK_ASSERT(sc, MA_OWNED);
1117 /* Get a work queue entry. */
1118 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1119 /* Ran out of descriptors. */
1124 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
1125 if (M_WRITABLE(*m_head) == 0) {
1126 m = m_dup(*m_head, M_NOWAIT);
1132 offset = sizeof(struct ether_header);
1133 m = m_pullup(*m_head, offset + sizeof(struct ip));
1138 ip = (struct ip *)(mtod(m, caddr_t) + offset);
1139 offset += (ip->ip_hl << 2);
1140 cflags = offset << GEM_TD_CXSUM_STARTSHFT |
1141 ((offset + m->m_pkthdr.csum_data) <<
1142 GEM_TD_CXSUM_STUFFSHFT) | GEM_TD_CXSUM_ENABLE;
1146 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1147 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1148 if (error == EFBIG) {
1149 m = m_collapse(*m_head, M_NOWAIT, GEM_NTXSEGS);
1156 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag,
1157 txs->txs_dmamap, *m_head, txsegs, &nsegs,
1164 } else if (error != 0)
1166 /* If nsegs is wrong then the stack is corrupt. */
1167 KASSERT(nsegs <= GEM_NTXSEGS,
1168 ("%s: too many DMA segments (%d)", __func__, nsegs));
1176 * Ensure we have enough descriptors free to describe
1177 * the packet. Note, we always reserve one descriptor
1178 * at the end of the ring as a termination point, in
1179 * order to prevent wrap-around.
1181 if (nsegs > sc->sc_txfree - 1) {
1182 txs->txs_ndescs = 0;
1183 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1187 txs->txs_ndescs = nsegs;
1188 txs->txs_firstdesc = sc->sc_txnext;
1189 nexttx = txs->txs_firstdesc;
1190 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) {
1193 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)",
1194 __func__, seg, nexttx, txsegs[seg].ds_len,
1195 txsegs[seg].ds_addr,
1196 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr));
1198 sc->sc_txdescs[nexttx].gd_addr =
1199 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr);
1200 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE,
1201 ("%s: segment size too large!", __func__));
1202 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE;
1203 sc->sc_txdescs[nexttx].gd_flags =
1204 GEM_DMA_WRITE(sc, flags | cflags);
1205 txs->txs_lastdesc = nexttx;
1208 /* Set EOP on the last descriptor. */
1210 CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d",
1211 __func__, seg, nexttx);
1213 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |=
1214 GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET);
1216 /* Lastly set SOP on the first descriptor. */
1218 CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d",
1219 __func__, seg, nexttx);
1221 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
1223 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1224 GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME |
1225 GEM_TD_START_OF_PACKET);
1227 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1228 GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET);
1230 /* Sync the DMA map. */
1231 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1232 BUS_DMASYNC_PREWRITE);
1235 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
1236 __func__, txs->txs_firstdesc, txs->txs_lastdesc,
1239 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1240 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1241 txs->txs_mbuf = *m_head;
1243 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1244 sc->sc_txfree -= txs->txs_ndescs;
1250 gem_init_regs(struct gem_softc *sc)
1252 const u_char *laddr = IF_LLADDR(sc->sc_ifp);
1254 GEM_LOCK_ASSERT(sc, MA_OWNED);
1256 /* These registers are not cleared on reset. */
1257 if ((sc->sc_flags & GEM_INITED) == 0) {
1259 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG0, 0);
1260 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG1, 8);
1261 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG2, 4);
1263 /* min frame length */
1264 GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1265 /* max frame length and max burst size */
1266 GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MAX_FRAME,
1267 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
1269 /* more magic values */
1270 GEM_BANK1_WRITE_4(sc, GEM_MAC_PREAMBLE_LEN, 0x7);
1271 GEM_BANK1_WRITE_4(sc, GEM_MAC_JAM_SIZE, 0x4);
1272 GEM_BANK1_WRITE_4(sc, GEM_MAC_ATTEMPT_LIMIT, 0x10);
1273 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_TYPE, 0x8808);
1275 /* random number seed */
1276 GEM_BANK1_WRITE_4(sc, GEM_MAC_RANDOM_SEED,
1277 ((laddr[5] << 8) | laddr[4]) & 0x3ff);
1279 /* secondary MAC address: 0:0:0:0:0:0 */
1280 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR3, 0);
1281 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR4, 0);
1282 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR5, 0);
1284 /* MAC control address: 01:80:c2:00:00:01 */
1285 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR6, 0x0001);
1286 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR7, 0xc200);
1287 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR8, 0x0180);
1289 /* MAC filter address: 0:0:0:0:0:0 */
1290 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER0, 0);
1291 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER1, 0);
1292 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER2, 0);
1293 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK1_2, 0);
1294 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK0, 0);
1296 sc->sc_flags |= GEM_INITED;
1299 /* Counters need to be zeroed. */
1300 GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
1301 GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
1302 GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
1303 GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
1304 GEM_BANK1_WRITE_4(sc, GEM_MAC_DEFER_TMR_CNT, 0);
1305 GEM_BANK1_WRITE_4(sc, GEM_MAC_PEAK_ATTEMPTS, 0);
1306 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_FRAME_COUNT, 0);
1307 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0);
1308 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0);
1309 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0);
1310 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0);
1312 /* Set XOFF PAUSE time. */
1313 GEM_BANK1_WRITE_4(sc, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
1315 /* Set the station address. */
1316 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR0, (laddr[4] << 8) | laddr[5]);
1317 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR1, (laddr[2] << 8) | laddr[3]);
1318 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR2, (laddr[0] << 8) | laddr[1]);
1320 /* Enable MII outputs. */
1321 GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA);
1325 gem_start(struct ifnet *ifp)
1327 struct gem_softc *sc = ifp->if_softc;
1330 gem_start_locked(ifp);
1335 gem_txkick(struct gem_softc *sc)
1339 * Update the TX kick register. This register has to point to the
1340 * descriptor after the last valid one and for optimum performance
1341 * should be incremented in multiples of 4 (the DMA engine fetches/
1342 * updates descriptors in batches of 4).
1345 CTR3(KTR_GEM, "%s: %s: kicking TX %d",
1346 device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
1348 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1349 GEM_BANK1_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext);
1353 gem_start_locked(struct ifnet *ifp)
1355 struct gem_softc *sc = ifp->if_softc;
1359 GEM_LOCK_ASSERT(sc, MA_OWNED);
1361 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1362 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0)
1366 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d",
1367 device_get_name(sc->sc_dev), __func__, sc->sc_txfree,
1372 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) {
1373 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1376 if (gem_load_txmbuf(sc, &m) != 0) {
1379 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1380 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1383 if ((sc->sc_txnext % 4) == 0) {
1396 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
1397 device_get_name(sc->sc_dev), sc->sc_txnext);
1400 /* Set a watchdog timer in case the chip flakes out. */
1401 sc->sc_wdog_timer = 5;
1403 CTR3(KTR_GEM, "%s: %s: watchdog %d",
1404 device_get_name(sc->sc_dev), __func__,
1411 gem_tint(struct gem_softc *sc)
1413 struct ifnet *ifp = sc->sc_ifp;
1414 struct gem_txsoft *txs;
1420 GEM_LOCK_ASSERT(sc, MA_OWNED);
1422 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1426 * Go through our TX list and free mbufs for those
1427 * frames that have been transmitted.
1430 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1431 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1433 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1434 printf(" txsoft %p transmit chain:\n", txs);
1435 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
1436 printf("descriptor %d: ", i);
1437 printf("gd_flags: 0x%016llx\t",
1438 (long long)GEM_DMA_READ(sc,
1439 sc->sc_txdescs[i].gd_flags));
1440 printf("gd_addr: 0x%016llx\n",
1441 (long long)GEM_DMA_READ(sc,
1442 sc->sc_txdescs[i].gd_addr));
1443 if (i == txs->txs_lastdesc)
1450 * In theory, we could harvest some descriptors before
1451 * the ring is empty, but that's a bit complicated.
1453 * GEM_TX_COMPLETION points to the last descriptor
1456 txlast = GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION);
1458 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, "
1459 "txs->txs_lastdesc = %d, txlast = %d",
1460 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1462 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1463 if ((txlast >= txs->txs_firstdesc) &&
1464 (txlast <= txs->txs_lastdesc))
1467 /* Ick -- this command wraps. */
1468 if ((txlast >= txs->txs_firstdesc) ||
1469 (txlast <= txs->txs_lastdesc))
1474 CTR1(KTR_GEM, "%s: releasing a descriptor", __func__);
1476 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1478 sc->sc_txfree += txs->txs_ndescs;
1480 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1481 BUS_DMASYNC_POSTWRITE);
1482 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1483 if (txs->txs_mbuf != NULL) {
1484 m_freem(txs->txs_mbuf);
1485 txs->txs_mbuf = NULL;
1488 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1490 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1495 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx "
1496 "GEM_TX_COMPLETION %x",
1497 __func__, GEM_BANK1_READ_4(sc, GEM_TX_STATE_MACHINE),
1498 ((long long)GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_HI) << 32) |
1499 GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_LO),
1500 GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION));
1504 if (sc->sc_txfree == GEM_NTXDESC - 1)
1508 * We freed some descriptors, so reset IFF_DRV_OACTIVE
1511 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1512 if (STAILQ_EMPTY(&sc->sc_txdirtyq))
1513 sc->sc_wdog_timer = 0;
1514 gem_start_locked(ifp);
1518 CTR3(KTR_GEM, "%s: %s: watchdog %d",
1519 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1523 #ifdef GEM_RINT_TIMEOUT
1525 gem_rint_timeout(void *arg)
1527 struct gem_softc *sc = arg;
1529 GEM_LOCK_ASSERT(sc, MA_OWNED);
1536 gem_rint(struct gem_softc *sc)
1538 struct ifnet *ifp = sc->sc_ifp;
1543 GEM_LOCK_ASSERT(sc, MA_OWNED);
1545 #ifdef GEM_RINT_TIMEOUT
1546 callout_stop(&sc->sc_rx_ch);
1549 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1553 * Read the completion register once. This limits
1554 * how long the following loop can execute.
1556 rxcomp = GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION);
1558 CTR3(KTR_GEM, "%s: sc->sc_rxptr %d, complete %d",
1559 __func__, sc->sc_rxptr, rxcomp);
1561 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1562 for (; sc->sc_rxptr != rxcomp;) {
1563 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf;
1564 rxstat = GEM_DMA_READ(sc,
1565 sc->sc_rxdescs[sc->sc_rxptr].gd_flags);
1567 if (rxstat & GEM_RD_OWN) {
1568 #ifdef GEM_RINT_TIMEOUT
1570 * The descriptor is still marked as owned, although
1571 * it is supposed to have completed. This has been
1572 * observed on some machines. Just exiting here
1573 * might leave the packet sitting around until another
1574 * one arrives to trigger a new interrupt, which is
1575 * generally undesirable, so set up a timeout.
1577 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS,
1578 gem_rint_timeout, sc);
1584 if (rxstat & GEM_RD_BAD_CRC) {
1585 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1586 device_printf(sc->sc_dev, "receive error: CRC error\n");
1587 GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1593 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1594 printf(" rxsoft %p descriptor %d: ",
1595 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr);
1596 printf("gd_flags: 0x%016llx\t",
1597 (long long)GEM_DMA_READ(sc,
1598 sc->sc_rxdescs[sc->sc_rxptr].gd_flags));
1599 printf("gd_addr: 0x%016llx\n",
1600 (long long)GEM_DMA_READ(sc,
1601 sc->sc_rxdescs[sc->sc_rxptr].gd_addr));
1606 * Allocate a new mbuf cluster. If that fails, we are
1607 * out of memory, and must drop the packet and recycle
1608 * the buffer that's already attached to this descriptor.
1610 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) {
1611 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1612 GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1618 * Update the RX kick register. This register has to point
1619 * to the descriptor after the last valid one (before the
1620 * current batch) and for optimum performance should be
1621 * incremented in multiples of 4 (the DMA engine fetches/
1622 * updates descriptors in batches of 4).
1624 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr);
1625 if ((sc->sc_rxptr % 4) == 0) {
1627 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1628 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK,
1629 (sc->sc_rxptr + GEM_NRXDESC - 4) &
1634 if (rxstat & GEM_RD_OWN)
1639 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1640 m->m_data += ETHER_ALIGN; /* first byte offset */
1641 m->m_pkthdr.rcvif = ifp;
1642 m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat);
1644 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1645 gem_rxcksum(m, rxstat);
1649 (*ifp->if_input)(ifp, m);
1654 CTR3(KTR_GEM, "%s: done sc->sc_rxptr %d, complete %d", __func__,
1655 sc->sc_rxptr, GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION));
1660 gem_add_rxbuf(struct gem_softc *sc, int idx)
1662 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1664 bus_dma_segment_t segs[1];
1667 GEM_LOCK_ASSERT(sc, MA_OWNED);
1669 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1672 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1675 /* Bzero the packet to check DMA. */
1676 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1679 if (rxs->rxs_mbuf != NULL) {
1680 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1681 BUS_DMASYNC_POSTREAD);
1682 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
1685 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap,
1686 m, segs, &nsegs, BUS_DMA_NOWAIT);
1688 device_printf(sc->sc_dev,
1689 "cannot load RS DMA map %d, error = %d\n", idx, error);
1693 /* If nsegs is wrong then the stack is corrupt. */
1695 ("%s: too many DMA segments (%d)", __func__, nsegs));
1697 rxs->rxs_paddr = segs[0].ds_addr;
1699 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1700 BUS_DMASYNC_PREREAD);
1702 GEM_INIT_RXDESC(sc, idx);
1708 gem_eint(struct gem_softc *sc, u_int status)
1711 if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
1712 if ((status & GEM_INTR_RX_TAG_ERR) != 0) {
1713 gem_reset_rxdma(sc);
1717 device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status);
1718 if ((status & GEM_INTR_BERR) != 0) {
1719 if ((sc->sc_flags & GEM_PCI) != 0)
1720 printf(", PCI bus error 0x%x\n",
1721 GEM_BANK1_READ_4(sc, GEM_PCI_ERROR_STATUS));
1723 printf(", SBus error 0x%x\n",
1724 GEM_BANK1_READ_4(sc, GEM_SBUS_STATUS));
1731 struct gem_softc *sc = v;
1732 uint32_t status, status2;
1735 status = GEM_BANK1_READ_4(sc, GEM_STATUS);
1738 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x",
1739 device_get_name(sc->sc_dev), __func__,
1740 (status >> GEM_STATUS_TX_COMPLETION_SHFT), (u_int)status);
1743 * PCS interrupts must be cleared, otherwise no traffic is passed!
1745 if ((status & GEM_INTR_PCS) != 0) {
1747 GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS) |
1748 GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS);
1749 if ((status2 & GEM_MII_INTERRUP_LINK) != 0)
1750 device_printf(sc->sc_dev,
1751 "%s: PCS link status changed\n", __func__);
1753 if ((status & GEM_MAC_CONTROL_STATUS) != 0) {
1754 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_STATUS);
1755 if ((status2 & GEM_MAC_PAUSED) != 0)
1756 device_printf(sc->sc_dev,
1757 "%s: PAUSE received (PAUSE time %d slots)\n",
1758 __func__, GEM_MAC_PAUSE_TIME(status2));
1759 if ((status2 & GEM_MAC_PAUSE) != 0)
1760 device_printf(sc->sc_dev,
1761 "%s: transited to PAUSE state\n", __func__);
1762 if ((status2 & GEM_MAC_RESUME) != 0)
1763 device_printf(sc->sc_dev,
1764 "%s: transited to non-PAUSE state\n", __func__);
1766 if ((status & GEM_INTR_MIF) != 0)
1767 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__);
1770 if (__predict_false(status &
1771 (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0)
1772 gem_eint(sc, status);
1774 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1777 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1780 if (__predict_false((status & GEM_INTR_TX_MAC) != 0)) {
1781 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS);
1783 ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
1784 GEM_MAC_TX_PEAK_EXP)) != 0)
1785 device_printf(sc->sc_dev,
1786 "MAC TX fault, status %x\n", status2);
1788 (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) {
1789 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
1790 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1791 gem_init_locked(sc);
1794 if (__predict_false((status & GEM_INTR_RX_MAC) != 0)) {
1795 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS);
1797 * At least with GEM_SUN_GEM and some GEM_SUN_ERI
1798 * revisions GEM_MAC_RX_OVERFLOW happen often due to a
1799 * silicon bug so handle them silently. Moreover, it's
1800 * likely that the receiver has hung so we reset it.
1802 if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) {
1803 if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
1804 gem_reset_rxdma(sc);
1805 } else if ((status2 &
1806 ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0)
1807 device_printf(sc->sc_dev,
1808 "MAC RX fault, status %x\n", status2);
1814 gem_watchdog(struct gem_softc *sc)
1816 struct ifnet *ifp = sc->sc_ifp;
1818 GEM_LOCK_ASSERT(sc, MA_OWNED);
1822 "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x",
1823 __func__, GEM_BANK1_READ_4(sc, GEM_RX_CONFIG),
1824 GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS),
1825 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG));
1827 "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x",
1828 __func__, GEM_BANK1_READ_4(sc, GEM_TX_CONFIG),
1829 GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS),
1830 GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG));
1833 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1836 if ((sc->sc_flags & GEM_LINK) != 0)
1837 device_printf(sc->sc_dev, "device timeout\n");
1838 else if (bootverbose)
1839 device_printf(sc->sc_dev, "device timeout (no link)\n");
1840 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1842 /* Try to get more packets going. */
1843 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1844 gem_init_locked(sc);
1845 gem_start_locked(ifp);
1846 return (EJUSTRETURN);
1850 gem_mifinit(struct gem_softc *sc)
1853 /* Configure the MIF in frame mode. */
1854 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG,
1855 GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA);
1856 GEM_BANK1_BARRIER(sc, GEM_MIF_CONFIG, 4,
1857 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1863 * The MII interface supports at least three different operating modes:
1865 * Bitbang mode is implemented using data, clock and output enable registers.
1867 * Frame mode is implemented by loading a complete frame into the frame
1868 * register and polling the valid bit for completion.
1870 * Polling mode uses the frame register but completion is indicated by
1875 gem_mii_readreg(device_t dev, int phy, int reg)
1877 struct gem_softc *sc;
1881 #ifdef GEM_DEBUG_PHY
1882 printf("%s: phy %d reg %d\n", __func__, phy, reg);
1885 sc = device_get_softc(dev);
1886 if ((sc->sc_flags & GEM_SERDES) != 0) {
1889 reg = GEM_MII_CONTROL;
1892 reg = GEM_MII_STATUS;
1901 reg = GEM_MII_ANLPAR;
1904 return (EXTSR_1000XFDX | EXTSR_1000XHDX);
1906 device_printf(sc->sc_dev,
1907 "%s: unhandled register %d\n", __func__, reg);
1910 return (GEM_BANK1_READ_4(sc, reg));
1913 /* Construct the frame command. */
1914 v = GEM_MIF_FRAME_READ |
1915 (phy << GEM_MIF_PHY_SHIFT) |
1916 (reg << GEM_MIF_REG_SHIFT);
1918 GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v);
1919 GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4,
1920 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1921 for (n = 0; n < 100; n++) {
1923 v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME);
1924 if (v & GEM_MIF_FRAME_TA0)
1925 return (v & GEM_MIF_FRAME_DATA);
1928 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
1933 gem_mii_writereg(device_t dev, int phy, int reg, int val)
1935 struct gem_softc *sc;
1939 #ifdef GEM_DEBUG_PHY
1940 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__);
1943 sc = device_get_softc(dev);
1944 if ((sc->sc_flags & GEM_SERDES) != 0) {
1947 reg = GEM_MII_STATUS;
1950 reg = GEM_MII_CONTROL;
1951 if ((val & GEM_MII_CONTROL_RESET) == 0)
1953 GEM_BANK1_WRITE_4(sc, GEM_MII_CONTROL, val);
1954 GEM_BANK1_BARRIER(sc, GEM_MII_CONTROL, 4,
1955 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1956 if (!GEM_BANK1_BITWAIT(sc, GEM_MII_CONTROL,
1957 GEM_MII_CONTROL_RESET, 0))
1958 device_printf(sc->sc_dev,
1959 "cannot reset PCS\n");
1962 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, 0);
1963 GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4,
1964 BUS_SPACE_BARRIER_WRITE);
1965 GEM_BANK1_WRITE_4(sc, GEM_MII_ANAR, val);
1966 GEM_BANK1_BARRIER(sc, GEM_MII_ANAR, 4,
1967 BUS_SPACE_BARRIER_WRITE);
1968 GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
1969 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
1970 GEM_BANK1_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4,
1971 BUS_SPACE_BARRIER_WRITE);
1972 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG,
1973 GEM_MII_CONFIG_ENABLE);
1974 GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4,
1975 BUS_SPACE_BARRIER_WRITE);
1978 reg = GEM_MII_ANLPAR;
1981 device_printf(sc->sc_dev,
1982 "%s: unhandled register %d\n", __func__, reg);
1985 GEM_BANK1_WRITE_4(sc, reg, val);
1986 GEM_BANK1_BARRIER(sc, reg, 4,
1987 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1991 /* Construct the frame command. */
1992 v = GEM_MIF_FRAME_WRITE |
1993 (phy << GEM_MIF_PHY_SHIFT) |
1994 (reg << GEM_MIF_REG_SHIFT) |
1995 (val & GEM_MIF_FRAME_DATA);
1997 GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v);
1998 GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4,
1999 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2000 for (n = 0; n < 100; n++) {
2002 v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME);
2003 if (v & GEM_MIF_FRAME_TA0)
2007 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
2012 gem_mii_statchg(device_t dev)
2014 struct gem_softc *sc;
2016 uint32_t rxcfg, txcfg, v;
2018 sc = device_get_softc(dev);
2020 GEM_LOCK_ASSERT(sc, MA_OWNED);
2023 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
2024 device_printf(sc->sc_dev, "%s: status change\n", __func__);
2027 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
2028 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
2029 sc->sc_flags |= GEM_LINK;
2031 sc->sc_flags &= ~GEM_LINK;
2033 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) {
2045 * The configuration done here corresponds to the steps F) and
2046 * G) and as far as enabling of RX and TX MAC goes also step H)
2047 * of the initialization sequence outlined in section 3.2.1 of
2048 * the GEM Gigabit Ethernet ASIC Specification.
2051 rxcfg = sc->sc_mac_rxcfg;
2052 rxcfg &= ~GEM_MAC_RX_CARR_EXTEND;
2053 txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT;
2054 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2055 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS;
2056 else if (gigabit != 0) {
2057 rxcfg |= GEM_MAC_RX_CARR_EXTEND;
2058 txcfg |= GEM_MAC_TX_CARR_EXTEND;
2060 (void)gem_disable_tx(sc);
2061 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, txcfg);
2062 (void)gem_disable_rx(sc);
2063 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, rxcfg);
2065 v = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_CONFIG) &
2066 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
2067 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2068 IFM_ETH_RXPAUSE) != 0)
2069 v |= GEM_MAC_CC_RX_PAUSE;
2070 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2071 IFM_ETH_TXPAUSE) != 0)
2072 v |= GEM_MAC_CC_TX_PAUSE;
2073 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_CONFIG, v);
2075 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 &&
2077 GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME,
2078 GEM_MAC_SLOT_TIME_CARR_EXTEND);
2080 GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME,
2081 GEM_MAC_SLOT_TIME_NORMAL);
2083 /* XIF Configuration */
2084 v = GEM_MAC_XIF_LINK_LED;
2085 v |= GEM_MAC_XIF_TX_MII_ENA;
2086 if ((sc->sc_flags & GEM_SERDES) == 0) {
2087 if ((GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) &
2088 GEM_MIF_CONFIG_PHY_SEL) != 0) {
2089 /* External MII needs echo disable if half duplex. */
2090 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2092 v |= GEM_MAC_XIF_ECHO_DISABL;
2095 * Internal MII needs buffer enable.
2096 * XXX buffer enable makes only sense for an
2099 v |= GEM_MAC_XIF_MII_BUF_ENA;
2102 v |= GEM_MAC_XIF_GMII_MODE;
2103 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2104 v |= GEM_MAC_XIF_FDPLX_LED;
2105 GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, v);
2107 sc->sc_mac_rxcfg = rxcfg;
2108 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2109 (sc->sc_flags & GEM_LINK) != 0) {
2110 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG,
2111 txcfg | GEM_MAC_TX_ENABLE);
2112 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG,
2113 rxcfg | GEM_MAC_RX_ENABLE);
2118 gem_mediachange(struct ifnet *ifp)
2120 struct gem_softc *sc = ifp->if_softc;
2123 /* XXX add support for serial media. */
2126 error = mii_mediachg(sc->sc_mii);
2132 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2134 struct gem_softc *sc = ifp->if_softc;
2137 if ((ifp->if_flags & IFF_UP) == 0) {
2142 mii_pollstat(sc->sc_mii);
2143 ifmr->ifm_active = sc->sc_mii->mii_media_active;
2144 ifmr->ifm_status = sc->sc_mii->mii_media_status;
2149 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2151 struct gem_softc *sc = ifp->if_softc;
2152 struct ifreq *ifr = (struct ifreq *)data;
2159 if ((ifp->if_flags & IFF_UP) != 0) {
2160 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2161 ((ifp->if_flags ^ sc->sc_ifflags) &
2162 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
2165 gem_init_locked(sc);
2166 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2168 if ((ifp->if_flags & IFF_LINK0) != 0)
2169 sc->sc_csum_features |= CSUM_UDP;
2171 sc->sc_csum_features &= ~CSUM_UDP;
2172 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2173 ifp->if_hwassist = sc->sc_csum_features;
2174 sc->sc_ifflags = ifp->if_flags;
2180 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2186 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
2190 ifp->if_capenable = ifr->ifr_reqcap;
2191 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2192 ifp->if_hwassist = sc->sc_csum_features;
2194 ifp->if_hwassist = 0;
2198 error = ether_ioctl(ifp, cmd, data);
2206 gem_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2208 uint32_t crc, *hash = arg;
2210 crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN);
2211 /* We just want the 8 most significant bits. */
2213 /* Set the corresponding bit in the filter. */
2214 hash[crc >> 4] |= 1 << (15 - (crc & 15));
2220 gem_setladrf(struct gem_softc *sc)
2222 struct ifnet *ifp = sc->sc_ifp;
2227 GEM_LOCK_ASSERT(sc, MA_OWNED);
2230 * Turn off the RX MAC and the hash filter as required by the Sun GEM
2231 * programming restrictions.
2233 v = sc->sc_mac_rxcfg & ~GEM_MAC_RX_HASH_FILTER;
2234 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);
2235 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
2236 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2237 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER |
2238 GEM_MAC_RX_ENABLE, 0))
2239 device_printf(sc->sc_dev,
2240 "cannot disable RX MAC or hash filter\n");
2242 v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_PROMISC_GRP);
2243 if ((ifp->if_flags & IFF_PROMISC) != 0) {
2244 v |= GEM_MAC_RX_PROMISCUOUS;
2247 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
2248 v |= GEM_MAC_RX_PROMISC_GRP;
2253 * Set up multicast address filter by passing all multicast
2254 * addresses through a crc generator, and then using the high
2255 * order 8 bits as an index into the 256 bit logical address
2256 * filter. The high order 4 bits selects the word, while the
2257 * other 4 bits select the bit within the word (where bit 0
2261 memset(hash, 0, sizeof(hash));
2262 if_foreach_llmaddr(ifp, gem_hash_maddr, hash);
2264 v |= GEM_MAC_RX_HASH_FILTER;
2266 /* Now load the hash table into the chip (if we are using it). */
2267 for (i = 0; i < 16; i++)
2268 GEM_BANK1_WRITE_4(sc,
2269 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),
2273 sc->sc_mac_rxcfg = v;
2274 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v | GEM_MAC_RX_ENABLE);