2 * Copyright (C) 2001 Eduardo Horvath.
3 * Copyright (c) 2001-2003 Thomas Moestl
4 * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers
42 #if 0 /* XXX: In case of emergency, re-enable this. */
43 #define GEM_RINT_TIMEOUT
46 #include <sys/param.h>
47 #include <sys/systm.h>
49 #include <sys/callout.h>
50 #include <sys/endian.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
55 #include <sys/module.h>
56 #include <sys/mutex.h>
57 #include <sys/socket.h>
58 #include <sys/sockio.h>
62 #include <net/ethernet.h>
64 #include <net/if_arp.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/if_vlan_var.h>
70 #include <netinet/in.h>
71 #include <netinet/in_systm.h>
72 #include <netinet/ip.h>
73 #include <netinet/tcp.h>
74 #include <netinet/udp.h>
76 #include <machine/bus.h>
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
81 #include <dev/gem/if_gemreg.h>
82 #include <dev/gem/if_gemvar.h>
84 CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192);
85 CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192);
87 #define GEM_TRIES 10000
90 * The hardware supports basic TCP/UDP checksum offloading. However,
91 * the hardware doesn't compensate the checksum for UDP datagram which
92 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled
93 * by default. It can be reactivated by setting special link option
94 * link0 with ifconfig(8).
96 #define GEM_CSUM_FEATURES (CSUM_TCP)
98 static int gem_add_rxbuf(struct gem_softc *sc, int idx);
99 static int gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r,
100 uint32_t clr, uint32_t set);
101 static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs,
102 int nsegs, int error);
103 static int gem_disable_rx(struct gem_softc *sc);
104 static int gem_disable_tx(struct gem_softc *sc);
105 static void gem_eint(struct gem_softc *sc, u_int status);
106 static void gem_init(void *xsc);
107 static void gem_init_locked(struct gem_softc *sc);
108 static void gem_init_regs(struct gem_softc *sc);
109 static int gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
110 static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head);
111 static int gem_meminit(struct gem_softc *sc);
112 static void gem_mifinit(struct gem_softc *sc);
113 static void gem_reset(struct gem_softc *sc);
114 static int gem_reset_rx(struct gem_softc *sc);
115 static void gem_reset_rxdma(struct gem_softc *sc);
116 static int gem_reset_tx(struct gem_softc *sc);
117 static u_int gem_ringsize(u_int sz);
118 static void gem_rint(struct gem_softc *sc);
119 #ifdef GEM_RINT_TIMEOUT
120 static void gem_rint_timeout(void *arg);
122 static inline void gem_rxcksum(struct mbuf *m, uint64_t flags);
123 static void gem_rxdrain(struct gem_softc *sc);
124 static void gem_setladrf(struct gem_softc *sc);
125 static void gem_start(struct ifnet *ifp);
126 static void gem_start_locked(struct ifnet *ifp);
127 static void gem_stop(struct ifnet *ifp, int disable);
128 static void gem_tick(void *arg);
129 static void gem_tint(struct gem_softc *sc);
130 static inline void gem_txkick(struct gem_softc *sc);
131 static int gem_watchdog(struct gem_softc *sc);
133 devclass_t gem_devclass;
134 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0);
135 MODULE_DEPEND(gem, miibus, 1, 1, 1);
139 #define KTR_GEM KTR_CT2
142 #define GEM_BANK1_BITWAIT(sc, r, clr, set) \
143 gem_bitwait((sc), GEM_RES_BANK1, (r), (clr), (set))
144 #define GEM_BANK2_BITWAIT(sc, r, clr, set) \
145 gem_bitwait((sc), GEM_RES_BANK2, (r), (clr), (set))
148 gem_attach(struct gem_softc *sc)
150 struct gem_txsoft *txs;
156 device_printf(sc->sc_dev, "flags=0x%x\n", sc->sc_flags);
158 /* Set up ifnet structure. */
159 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
162 sc->sc_csum_features = GEM_CSUM_FEATURES;
164 if_initname(ifp, device_get_name(sc->sc_dev),
165 device_get_unit(sc->sc_dev));
166 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
167 ifp->if_start = gem_start;
168 ifp->if_ioctl = gem_ioctl;
169 ifp->if_init = gem_init;
170 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN);
171 ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN;
172 IFQ_SET_READY(&ifp->if_snd);
174 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
175 #ifdef GEM_RINT_TIMEOUT
176 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
179 /* Make sure the chip is stopped. */
182 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
183 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
184 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,
185 NULL, &sc->sc_pdmatag);
189 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
190 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
191 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
195 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
196 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
197 MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES,
198 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
202 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
203 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
204 sizeof(struct gem_control_data), 1,
205 sizeof(struct gem_control_data), 0,
206 NULL, NULL, &sc->sc_cdmatag);
211 * Allocate the control data structures, create and load the
214 if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
215 (void **)&sc->sc_control_data,
216 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
217 &sc->sc_cddmamap)) != 0) {
218 device_printf(sc->sc_dev,
219 "unable to allocate control data, error = %d\n", error);
224 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
225 sc->sc_control_data, sizeof(struct gem_control_data),
226 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
227 device_printf(sc->sc_dev,
228 "unable to load control data DMA map, error = %d\n",
234 * Initialize the transmit job descriptors.
236 STAILQ_INIT(&sc->sc_txfreeq);
237 STAILQ_INIT(&sc->sc_txdirtyq);
240 * Create the transmit buffer DMA maps.
243 for (i = 0; i < GEM_TXQUEUELEN; i++) {
244 txs = &sc->sc_txsoft[i];
245 txs->txs_mbuf = NULL;
247 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
248 &txs->txs_dmamap)) != 0) {
249 device_printf(sc->sc_dev,
250 "unable to create TX DMA map %d, error = %d\n",
254 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
258 * Create the receive buffer DMA maps.
260 for (i = 0; i < GEM_NRXDESC; i++) {
261 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0,
262 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
263 device_printf(sc->sc_dev,
264 "unable to create RX DMA map %d, error = %d\n",
268 sc->sc_rxsoft[i].rxs_mbuf = NULL;
271 /* Bypass probing PHYs if we already know for sure to use a SERDES. */
272 if ((sc->sc_flags & GEM_SERDES) != 0)
275 /* Bad things will happen when touching this register on ERI. */
276 if (sc->sc_variant != GEM_SUN_ERI) {
277 GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE,
278 GEM_MII_DATAPATH_MII);
279 GEM_BANK1_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4,
280 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
286 * Look for an external PHY.
289 v = GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG);
290 if ((v & GEM_MIF_CONFIG_MDI1) != 0) {
291 v |= GEM_MIF_CONFIG_PHY_SEL;
292 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v);
293 GEM_BANK1_BARRIER(sc, GEM_MIF_CONFIG, 4,
294 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
295 switch (sc->sc_variant) {
297 phy = GEM_PHYAD_EXTERNAL;
303 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
304 gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, phy,
305 MII_OFFSET_ANY, MIIF_DOPAUSE);
309 * Fall back on an internal PHY if no external PHY was found.
310 * Note that with Apple (K2) GMACs GEM_MIF_CONFIG_MDI0 can't be
311 * trusted when the firmware has powered down the chip.
314 ((v & GEM_MIF_CONFIG_MDI0) != 0 || GEM_IS_APPLE(sc))) {
315 v &= ~GEM_MIF_CONFIG_PHY_SEL;
316 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v);
317 GEM_BANK1_BARRIER(sc, GEM_MIF_CONFIG, 4,
318 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
319 switch (sc->sc_variant) {
321 case GEM_APPLE_K2_GMAC:
322 phy = GEM_PHYAD_INTERNAL;
325 phy = GEM_PHYAD_EXTERNAL;
331 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
332 gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, phy,
333 MII_OFFSET_ANY, MIIF_DOPAUSE);
337 * Try the external PCS SERDES if we didn't find any PHYs.
339 if (error != 0 && sc->sc_variant == GEM_SUN_GEM) {
341 GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE,
342 GEM_MII_DATAPATH_SERDES);
343 GEM_BANK1_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4,
344 BUS_SPACE_BARRIER_WRITE);
345 GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
346 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
347 GEM_BANK1_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4,
348 BUS_SPACE_BARRIER_WRITE);
349 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
350 GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4,
351 BUS_SPACE_BARRIER_WRITE);
352 sc->sc_flags |= GEM_SERDES;
353 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
354 gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK,
355 GEM_PHYAD_EXTERNAL, MII_OFFSET_ANY, MIIF_DOPAUSE);
358 device_printf(sc->sc_dev, "attaching PHYs failed\n");
361 sc->sc_mii = device_get_softc(sc->sc_miibus);
364 * From this point forward, the attachment cannot fail. A failure
365 * before this point releases all resources that may have been
369 /* Get RX FIFO size. */
370 sc->sc_rxfifosize = 64 *
371 GEM_BANK1_READ_4(sc, GEM_RX_FIFO_SIZE);
373 /* Get TX FIFO size. */
374 v = GEM_BANK1_READ_4(sc, GEM_TX_FIFO_SIZE);
375 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
376 sc->sc_rxfifosize / 1024, v / 16);
378 /* Attach the interface. */
379 ether_ifattach(ifp, sc->sc_enaddr);
382 * Tell the upper layer(s) we support long frames/checksum offloads.
384 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
385 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
386 ifp->if_hwassist |= sc->sc_csum_features;
387 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
392 * Free any resources we've allocated during the failed attach
393 * attempt. Do this in reverse order and fall through.
396 for (i = 0; i < GEM_NRXDESC; i++)
397 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
398 bus_dmamap_destroy(sc->sc_rdmatag,
399 sc->sc_rxsoft[i].rxs_dmamap);
401 for (i = 0; i < GEM_TXQUEUELEN; i++)
402 if (sc->sc_txsoft[i].txs_dmamap != NULL)
403 bus_dmamap_destroy(sc->sc_tdmatag,
404 sc->sc_txsoft[i].txs_dmamap);
405 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
407 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
410 bus_dma_tag_destroy(sc->sc_cdmatag);
412 bus_dma_tag_destroy(sc->sc_tdmatag);
414 bus_dma_tag_destroy(sc->sc_rdmatag);
416 bus_dma_tag_destroy(sc->sc_pdmatag);
423 gem_detach(struct gem_softc *sc)
425 struct ifnet *ifp = sc->sc_ifp;
432 callout_drain(&sc->sc_tick_ch);
433 #ifdef GEM_RINT_TIMEOUT
434 callout_drain(&sc->sc_rx_ch);
437 device_delete_child(sc->sc_dev, sc->sc_miibus);
439 for (i = 0; i < GEM_NRXDESC; i++)
440 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
441 bus_dmamap_destroy(sc->sc_rdmatag,
442 sc->sc_rxsoft[i].rxs_dmamap);
443 for (i = 0; i < GEM_TXQUEUELEN; i++)
444 if (sc->sc_txsoft[i].txs_dmamap != NULL)
445 bus_dmamap_destroy(sc->sc_tdmatag,
446 sc->sc_txsoft[i].txs_dmamap);
447 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
448 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
449 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
451 bus_dma_tag_destroy(sc->sc_cdmatag);
452 bus_dma_tag_destroy(sc->sc_tdmatag);
453 bus_dma_tag_destroy(sc->sc_rdmatag);
454 bus_dma_tag_destroy(sc->sc_pdmatag);
458 gem_suspend(struct gem_softc *sc)
460 struct ifnet *ifp = sc->sc_ifp;
468 gem_resume(struct gem_softc *sc)
470 struct ifnet *ifp = sc->sc_ifp;
474 * On resume all registers have to be initialized again like
477 sc->sc_flags &= ~GEM_INITED;
478 if (ifp->if_flags & IFF_UP)
484 gem_rxcksum(struct mbuf *m, uint64_t flags)
486 struct ether_header *eh;
490 int32_t hlen, len, pktlen;
494 pktlen = m->m_pkthdr.len;
495 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
497 eh = mtod(m, struct ether_header *);
498 if (eh->ether_type != htons(ETHERTYPE_IP))
500 ip = (struct ip *)(eh + 1);
501 if (ip->ip_v != IPVERSION)
504 hlen = ip->ip_hl << 2;
505 pktlen -= sizeof(struct ether_header);
506 if (hlen < sizeof(struct ip))
508 if (ntohs(ip->ip_len) < hlen)
510 if (ntohs(ip->ip_len) != pktlen)
512 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
513 return; /* Cannot handle fragmented packet. */
517 if (pktlen < (hlen + sizeof(struct tcphdr)))
521 if (pktlen < (hlen + sizeof(struct udphdr)))
523 uh = (struct udphdr *)((uint8_t *)ip + hlen);
525 return; /* no checksum */
531 cksum = ~(flags & GEM_RD_CHECKSUM);
532 /* checksum fixup for IP options */
533 len = hlen - sizeof(struct ip);
535 opts = (uint16_t *)(ip + 1);
536 for (; len > 0; len -= sizeof(uint16_t), opts++) {
537 temp32 = cksum - *opts;
538 temp32 = (temp32 >> 16) + (temp32 & 65535);
539 cksum = temp32 & 65535;
542 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
543 m->m_pkthdr.csum_data = cksum;
547 gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
549 struct gem_softc *sc = xsc;
554 panic("%s: bad control buffer segment count", __func__);
555 sc->sc_cddma = segs[0].ds_addr;
561 struct gem_softc *sc = arg;
562 struct ifnet *ifp = sc->sc_ifp;
565 GEM_LOCK_ASSERT(sc, MA_OWNED);
568 * Unload collision and error counters.
570 ifp->if_collisions +=
571 GEM_BANK1_READ_4(sc, GEM_MAC_NORM_COLL_CNT) +
572 GEM_BANK1_READ_4(sc, GEM_MAC_FIRST_COLL_CNT);
573 v = GEM_BANK1_READ_4(sc, GEM_MAC_EXCESS_COLL_CNT) +
574 GEM_BANK1_READ_4(sc, GEM_MAC_LATE_COLL_CNT);
575 ifp->if_collisions += v;
576 ifp->if_oerrors += v;
578 GEM_BANK1_READ_4(sc, GEM_MAC_RX_LEN_ERR_CNT) +
579 GEM_BANK1_READ_4(sc, GEM_MAC_RX_ALIGN_ERR) +
580 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CRC_ERR_CNT) +
581 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CODE_VIOL);
584 * Then clear the hardware counters.
586 GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
587 GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
588 GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
589 GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
590 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0);
591 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0);
592 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0);
593 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0);
595 mii_tick(sc->sc_mii);
597 if (gem_watchdog(sc) == EJUSTRETURN)
600 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
604 gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r, uint32_t clr,
610 for (i = GEM_TRIES; i--; DELAY(100)) {
611 reg = GEM_BANKN_READ_M(bank, 4, sc, r);
612 if ((reg & clr) == 0 && (reg & set) == set)
619 gem_reset(struct gem_softc *sc)
623 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
628 /* Do a full reset. */
629 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX);
630 GEM_BANK2_BARRIER(sc, GEM_RESET, 4,
631 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
632 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
633 device_printf(sc->sc_dev, "cannot reset device\n");
637 gem_rxdrain(struct gem_softc *sc)
639 struct gem_rxsoft *rxs;
642 for (i = 0; i < GEM_NRXDESC; i++) {
643 rxs = &sc->sc_rxsoft[i];
644 if (rxs->rxs_mbuf != NULL) {
645 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
646 BUS_DMASYNC_POSTREAD);
647 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
648 m_freem(rxs->rxs_mbuf);
649 rxs->rxs_mbuf = NULL;
655 gem_stop(struct ifnet *ifp, int disable)
657 struct gem_softc *sc = ifp->if_softc;
658 struct gem_txsoft *txs;
661 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
664 callout_stop(&sc->sc_tick_ch);
665 #ifdef GEM_RINT_TIMEOUT
666 callout_stop(&sc->sc_rx_ch);
673 * Release any queued transmit buffers.
675 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
676 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
677 if (txs->txs_ndescs != 0) {
678 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
679 BUS_DMASYNC_POSTWRITE);
680 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
681 if (txs->txs_mbuf != NULL) {
682 m_freem(txs->txs_mbuf);
683 txs->txs_mbuf = NULL;
686 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
693 * Mark the interface down and cancel the watchdog timer.
695 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
696 sc->sc_flags &= ~GEM_LINK;
697 sc->sc_wdog_timer = 0;
701 gem_reset_rx(struct gem_softc *sc)
705 * Resetting while DMA is in progress can cause a bus hang, so we
709 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 0);
710 GEM_BANK1_BARRIER(sc, GEM_RX_CONFIG, 4,
711 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
712 if (!GEM_BANK1_BITWAIT(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0))
713 device_printf(sc->sc_dev, "cannot disable RX DMA\n");
715 /* Wait 5ms extra. */
718 /* Finally, reset the ERX. */
719 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX);
720 GEM_BANK2_BARRIER(sc, GEM_RESET, 4,
721 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
722 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX,
724 device_printf(sc->sc_dev, "cannot reset receiver\n");
731 * Reset the receiver DMA engine.
733 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW
734 * etc in order to reset the receiver DMA engine only and not do a full
735 * reset which amongst others also downs the link and clears the FIFOs.
738 gem_reset_rxdma(struct gem_softc *sc)
742 if (gem_reset_rx(sc) != 0)
743 return (gem_init_locked(sc));
744 for (i = 0; i < GEM_NRXDESC; i++)
745 if (sc->sc_rxsoft[i].rxs_mbuf != NULL)
746 GEM_UPDATE_RXDESC(sc, i);
748 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
750 /* NOTE: we use only 32-bit DMA addresses here. */
751 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
752 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
753 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
754 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
755 gem_ringsize(GEM_NRXDESC /* XXX */) |
756 ((ETHER_HDR_LEN + sizeof(struct ip)) <<
757 GEM_RX_CONFIG_CXM_START_SHFT) |
758 (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
759 (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT));
760 /* Adjust for the SBus clock probably isn't worth the fuzz. */
761 GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING,
762 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
763 GEM_RX_BLANKING_TIME_SHIFT) | 6);
764 GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
765 (3 * sc->sc_rxfifosize / 256) |
766 ((sc->sc_rxfifosize / 256) << 12));
767 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
768 GEM_BANK1_READ_4(sc, GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN);
769 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK,
770 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
771 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG,
772 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) | GEM_MAC_RX_ENABLE);
776 gem_reset_tx(struct gem_softc *sc)
780 * Resetting while DMA is in progress can cause a bus hang, so we
784 GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, 0);
785 GEM_BANK1_BARRIER(sc, GEM_TX_CONFIG, 4,
786 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
787 if (!GEM_BANK1_BITWAIT(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0))
788 device_printf(sc->sc_dev, "cannot disable TX DMA\n");
790 /* Wait 5ms extra. */
793 /* Finally, reset the ETX. */
794 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_TX);
795 GEM_BANK2_BARRIER(sc, GEM_RESET, 4,
796 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
797 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX,
799 device_printf(sc->sc_dev, "cannot reset transmitter\n");
806 gem_disable_rx(struct gem_softc *sc)
809 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG,
810 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) & ~GEM_MAC_RX_ENABLE);
811 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
812 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
813 return (GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE,
818 gem_disable_tx(struct gem_softc *sc)
821 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG,
822 GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG) & ~GEM_MAC_TX_ENABLE);
823 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4,
824 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
825 return (GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE,
830 gem_meminit(struct gem_softc *sc)
832 struct gem_rxsoft *rxs;
835 GEM_LOCK_ASSERT(sc, MA_OWNED);
838 * Initialize the transmit descriptor ring.
840 for (i = 0; i < GEM_NTXDESC; i++) {
841 sc->sc_txdescs[i].gd_flags = 0;
842 sc->sc_txdescs[i].gd_addr = 0;
844 sc->sc_txfree = GEM_MAXTXFREE;
849 * Initialize the receive descriptor and receive job
852 for (i = 0; i < GEM_NRXDESC; i++) {
853 rxs = &sc->sc_rxsoft[i];
854 if (rxs->rxs_mbuf == NULL) {
855 if ((error = gem_add_rxbuf(sc, i)) != 0) {
856 device_printf(sc->sc_dev,
857 "unable to allocate or map RX buffer %d, "
858 "error = %d\n", i, error);
860 * XXX we should attempt to run with fewer
861 * receive buffers instead of just failing.
867 GEM_INIT_RXDESC(sc, i);
871 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
877 gem_ringsize(u_int sz)
882 return (GEM_RING_SZ_32);
884 return (GEM_RING_SZ_64);
886 return (GEM_RING_SZ_128);
888 return (GEM_RING_SZ_256);
890 return (GEM_RING_SZ_512);
892 return (GEM_RING_SZ_1024);
894 return (GEM_RING_SZ_2048);
896 return (GEM_RING_SZ_4096);
898 return (GEM_RING_SZ_8192);
900 printf("%s: invalid ring size %d\n", __func__, sz);
901 return (GEM_RING_SZ_32);
908 struct gem_softc *sc = xsc;
916 * Initialization of interface; set up initialization block
917 * and transmit/receive descriptor rings.
920 gem_init_locked(struct gem_softc *sc)
922 struct ifnet *ifp = sc->sc_ifp;
925 GEM_LOCK_ASSERT(sc, MA_OWNED);
928 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev),
932 * Initialization sequence. The numbered steps below correspond
933 * to the sequence outlined in section 6.3.5.1 in the Ethernet
934 * Channel Engine manual (part of the PCIO manual).
935 * See also the STP2002-STQ document from Sun Microsystems.
938 /* step 1 & 2. Reset the Ethernet Channel. */
942 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev),
946 if ((sc->sc_flags & GEM_SERDES) == 0)
947 /* Re-initialize the MIF. */
950 /* step 3. Setup data structures in host memory. */
951 if (gem_meminit(sc) != 0)
954 /* step 4. TX MAC registers & counters */
957 /* step 5. RX MAC registers & counters */
960 /* step 6 & 7. Program Descriptor Ring Base Addresses. */
961 /* NOTE: we use only 32-bit DMA addresses here. */
962 GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_HI, 0);
963 GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
965 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
966 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
968 CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx",
969 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma);
972 /* step 8. Global Configuration & Interrupt Mask */
975 * Set the internal arbitration to "infinite" bursts of the
976 * maximum length of 31 * 64 bytes so DMA transfers aren't
977 * split up in cache line size chunks. This greatly improves
979 * Enable silicon bug workarounds for the Apple variants.
981 GEM_BANK1_WRITE_4(sc, GEM_CONFIG,
982 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
983 ((sc->sc_flags & GEM_PCI) != 0 ? GEM_CONFIG_BURST_INF :
984 GEM_CONFIG_BURST_64) | (GEM_IS_APPLE(sc) ?
985 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
987 GEM_BANK1_WRITE_4(sc, GEM_INTMASK,
988 ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE |
989 GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR |
992 | GEM_INTR_PCS | GEM_INTR_MIF
995 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK,
996 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
997 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_MASK,
998 GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
999 GEM_MAC_TX_PEAK_EXP);
1001 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
1002 ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME));
1004 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
1005 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME);
1008 /* step 9. ETX Configuration: use mostly default values. */
1011 v = gem_ringsize(GEM_NTXDESC);
1012 /* Set TX FIFO threshold and enable DMA. */
1013 v |= ((sc->sc_variant == GEM_SUN_ERI ? 0x100 : 0x4ff) << 10) &
1014 GEM_TX_CONFIG_TXFIFO_TH;
1015 GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN);
1017 /* step 10. ERX Configuration */
1019 /* Encode Receive Descriptor ring size. */
1020 v = gem_ringsize(GEM_NRXDESC /* XXX */);
1021 /* RX TCP/UDP checksum offset */
1022 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) <<
1023 GEM_RX_CONFIG_CXM_START_SHFT);
1024 /* Set RX FIFO threshold, set first byte offset and enable DMA. */
1025 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
1026 v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
1027 (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT) |
1028 GEM_RX_CONFIG_RXDMA_EN);
1030 /* Adjust for the SBus clock probably isn't worth the fuzz. */
1031 GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING,
1032 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
1033 GEM_RX_BLANKING_TIME_SHIFT) | 6);
1036 * The following value is for an OFF Threshold of about 3/4 full
1037 * and an ON Threshold of 1/4 full.
1039 GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
1040 (3 * sc->sc_rxfifosize / 256) |
1041 ((sc->sc_rxfifosize / 256) << 12));
1043 /* step 11. Configure Media. */
1045 /* step 12. RX_MAC Configuration Register */
1046 v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG);
1047 v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
1048 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 0);
1049 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
1050 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1051 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
1052 device_printf(sc->sc_dev, "cannot configure RX MAC\n");
1053 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);
1055 /* step 13. TX_MAC Configuration Register */
1056 v = GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG);
1057 v |= GEM_MAC_TX_ENABLE;
1058 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 0);
1059 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4,
1060 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1061 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0))
1062 device_printf(sc->sc_dev, "cannot configure TX MAC\n");
1063 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, v);
1065 /* step 14. Issue Transmit Pending command. */
1067 /* step 15. Give the reciever a swift kick. */
1068 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
1070 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1071 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1073 mii_mediachg(sc->sc_mii);
1075 /* Start the one second timer. */
1076 sc->sc_wdog_timer = 0;
1077 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
1081 gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head)
1083 bus_dma_segment_t txsegs[GEM_NTXSEGS];
1084 struct gem_txsoft *txs;
1087 uint64_t cflags, flags;
1088 int error, nexttx, nsegs, offset, seg;
1090 GEM_LOCK_ASSERT(sc, MA_OWNED);
1092 /* Get a work queue entry. */
1093 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1094 /* Ran out of descriptors. */
1099 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
1100 if (M_WRITABLE(*m_head) == 0) {
1101 m = m_dup(*m_head, M_DONTWAIT);
1107 offset = sizeof(struct ether_header);
1108 m = m_pullup(*m_head, offset + sizeof(struct ip));
1113 ip = (struct ip *)(mtod(m, caddr_t) + offset);
1114 offset += (ip->ip_hl << 2);
1115 cflags = offset << GEM_TD_CXSUM_STARTSHFT |
1116 ((offset + m->m_pkthdr.csum_data) <<
1117 GEM_TD_CXSUM_STUFFSHFT) | GEM_TD_CXSUM_ENABLE;
1121 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1122 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1123 if (error == EFBIG) {
1124 m = m_collapse(*m_head, M_DONTWAIT, GEM_NTXSEGS);
1131 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag,
1132 txs->txs_dmamap, *m_head, txsegs, &nsegs,
1139 } else if (error != 0)
1141 /* If nsegs is wrong then the stack is corrupt. */
1142 KASSERT(nsegs <= GEM_NTXSEGS,
1143 ("%s: too many DMA segments (%d)", __func__, nsegs));
1151 * Ensure we have enough descriptors free to describe
1152 * the packet. Note, we always reserve one descriptor
1153 * at the end of the ring as a termination point, in
1154 * order to prevent wrap-around.
1156 if (nsegs > sc->sc_txfree - 1) {
1157 txs->txs_ndescs = 0;
1158 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1162 txs->txs_ndescs = nsegs;
1163 txs->txs_firstdesc = sc->sc_txnext;
1164 nexttx = txs->txs_firstdesc;
1165 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) {
1168 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)",
1169 __func__, seg, nexttx, txsegs[seg].ds_len,
1170 txsegs[seg].ds_addr,
1171 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr));
1173 sc->sc_txdescs[nexttx].gd_addr =
1174 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr);
1175 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE,
1176 ("%s: segment size too large!", __func__));
1177 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE;
1178 sc->sc_txdescs[nexttx].gd_flags =
1179 GEM_DMA_WRITE(sc, flags | cflags);
1180 txs->txs_lastdesc = nexttx;
1183 /* Set EOP on the last descriptor. */
1185 CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d",
1186 __func__, seg, nexttx);
1188 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |=
1189 GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET);
1191 /* Lastly set SOP on the first descriptor. */
1193 CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d",
1194 __func__, seg, nexttx);
1196 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
1198 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1199 GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME |
1200 GEM_TD_START_OF_PACKET);
1202 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1203 GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET);
1205 /* Sync the DMA map. */
1206 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1207 BUS_DMASYNC_PREWRITE);
1210 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
1211 __func__, txs->txs_firstdesc, txs->txs_lastdesc,
1214 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1215 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1216 txs->txs_mbuf = *m_head;
1218 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1219 sc->sc_txfree -= txs->txs_ndescs;
1225 gem_init_regs(struct gem_softc *sc)
1227 const u_char *laddr = IF_LLADDR(sc->sc_ifp);
1229 GEM_LOCK_ASSERT(sc, MA_OWNED);
1231 /* These registers are not cleared on reset. */
1232 if ((sc->sc_flags & GEM_INITED) == 0) {
1234 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG0, 0);
1235 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG1, 8);
1236 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG2, 4);
1238 /* min frame length */
1239 GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1240 /* max frame length and max burst size */
1241 GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MAX_FRAME,
1242 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
1244 /* more magic values */
1245 GEM_BANK1_WRITE_4(sc, GEM_MAC_PREAMBLE_LEN, 0x7);
1246 GEM_BANK1_WRITE_4(sc, GEM_MAC_JAM_SIZE, 0x4);
1247 GEM_BANK1_WRITE_4(sc, GEM_MAC_ATTEMPT_LIMIT, 0x10);
1248 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_TYPE, 0x8808);
1250 /* random number seed */
1251 GEM_BANK1_WRITE_4(sc, GEM_MAC_RANDOM_SEED,
1252 ((laddr[5] << 8) | laddr[4]) & 0x3ff);
1254 /* secondary MAC address: 0:0:0:0:0:0 */
1255 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR3, 0);
1256 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR4, 0);
1257 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR5, 0);
1259 /* MAC control address: 01:80:c2:00:00:01 */
1260 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR6, 0x0001);
1261 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR7, 0xc200);
1262 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR8, 0x0180);
1264 /* MAC filter address: 0:0:0:0:0:0 */
1265 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER0, 0);
1266 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER1, 0);
1267 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER2, 0);
1268 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK1_2, 0);
1269 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK0, 0);
1271 sc->sc_flags |= GEM_INITED;
1274 /* Counters need to be zeroed. */
1275 GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
1276 GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
1277 GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
1278 GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
1279 GEM_BANK1_WRITE_4(sc, GEM_MAC_DEFER_TMR_CNT, 0);
1280 GEM_BANK1_WRITE_4(sc, GEM_MAC_PEAK_ATTEMPTS, 0);
1281 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_FRAME_COUNT, 0);
1282 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0);
1283 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0);
1284 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0);
1285 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0);
1287 /* Set XOFF PAUSE time. */
1288 GEM_BANK1_WRITE_4(sc, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
1290 /* Set the station address. */
1291 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR0, (laddr[4] << 8) | laddr[5]);
1292 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR1, (laddr[2] << 8) | laddr[3]);
1293 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR2, (laddr[0] << 8) | laddr[1]);
1295 /* Enable MII outputs. */
1296 GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA);
1300 gem_start(struct ifnet *ifp)
1302 struct gem_softc *sc = ifp->if_softc;
1305 gem_start_locked(ifp);
1310 gem_txkick(struct gem_softc *sc)
1314 * Update the TX kick register. This register has to point to the
1315 * descriptor after the last valid one and for optimum performance
1316 * should be incremented in multiples of 4 (the DMA engine fetches/
1317 * updates descriptors in batches of 4).
1320 CTR3(KTR_GEM, "%s: %s: kicking TX %d",
1321 device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
1323 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1324 GEM_BANK1_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext);
1328 gem_start_locked(struct ifnet *ifp)
1330 struct gem_softc *sc = ifp->if_softc;
1334 GEM_LOCK_ASSERT(sc, MA_OWNED);
1336 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1337 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0)
1341 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d",
1342 device_get_name(sc->sc_dev), __func__, sc->sc_txfree,
1347 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) {
1348 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1351 if (gem_load_txmbuf(sc, &m) != 0) {
1354 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1355 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1358 if ((sc->sc_txnext % 4) == 0) {
1371 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
1372 device_get_name(sc->sc_dev), sc->sc_txnext);
1375 /* Set a watchdog timer in case the chip flakes out. */
1376 sc->sc_wdog_timer = 5;
1378 CTR3(KTR_GEM, "%s: %s: watchdog %d",
1379 device_get_name(sc->sc_dev), __func__,
1386 gem_tint(struct gem_softc *sc)
1388 struct ifnet *ifp = sc->sc_ifp;
1389 struct gem_txsoft *txs;
1395 GEM_LOCK_ASSERT(sc, MA_OWNED);
1397 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1401 * Go through our TX list and free mbufs for those
1402 * frames that have been transmitted.
1405 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1406 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1408 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1409 printf(" txsoft %p transmit chain:\n", txs);
1410 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
1411 printf("descriptor %d: ", i);
1412 printf("gd_flags: 0x%016llx\t",
1413 (long long)GEM_DMA_READ(sc,
1414 sc->sc_txdescs[i].gd_flags));
1415 printf("gd_addr: 0x%016llx\n",
1416 (long long)GEM_DMA_READ(sc,
1417 sc->sc_txdescs[i].gd_addr));
1418 if (i == txs->txs_lastdesc)
1425 * In theory, we could harvest some descriptors before
1426 * the ring is empty, but that's a bit complicated.
1428 * GEM_TX_COMPLETION points to the last descriptor
1431 txlast = GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION);
1433 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, "
1434 "txs->txs_lastdesc = %d, txlast = %d",
1435 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1437 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1438 if ((txlast >= txs->txs_firstdesc) &&
1439 (txlast <= txs->txs_lastdesc))
1442 /* Ick -- this command wraps. */
1443 if ((txlast >= txs->txs_firstdesc) ||
1444 (txlast <= txs->txs_lastdesc))
1449 CTR1(KTR_GEM, "%s: releasing a descriptor", __func__);
1451 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1453 sc->sc_txfree += txs->txs_ndescs;
1455 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1456 BUS_DMASYNC_POSTWRITE);
1457 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1458 if (txs->txs_mbuf != NULL) {
1459 m_freem(txs->txs_mbuf);
1460 txs->txs_mbuf = NULL;
1463 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1470 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx "
1471 "GEM_TX_COMPLETION %x",
1472 __func__, GEM_BANK1_READ_4(sc, GEM_TX_STATE_MACHINE),
1473 ((long long)GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_HI) << 32) |
1474 GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_LO),
1475 GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION));
1479 if (sc->sc_txfree == GEM_NTXDESC - 1)
1483 * We freed some descriptors, so reset IFF_DRV_OACTIVE
1486 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1487 if (STAILQ_EMPTY(&sc->sc_txdirtyq))
1488 sc->sc_wdog_timer = 0;
1489 gem_start_locked(ifp);
1493 CTR3(KTR_GEM, "%s: %s: watchdog %d",
1494 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1498 #ifdef GEM_RINT_TIMEOUT
1500 gem_rint_timeout(void *arg)
1502 struct gem_softc *sc = arg;
1504 GEM_LOCK_ASSERT(sc, MA_OWNED);
1511 gem_rint(struct gem_softc *sc)
1513 struct ifnet *ifp = sc->sc_ifp;
1518 GEM_LOCK_ASSERT(sc, MA_OWNED);
1520 #ifdef GEM_RINT_TIMEOUT
1521 callout_stop(&sc->sc_rx_ch);
1524 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1528 * Read the completion register once. This limits
1529 * how long the following loop can execute.
1531 rxcomp = GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION);
1533 CTR3(KTR_GEM, "%s: sc->sc_rxptr %d, complete %d",
1534 __func__, sc->sc_rxptr, rxcomp);
1536 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1537 for (; sc->sc_rxptr != rxcomp;) {
1538 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf;
1539 rxstat = GEM_DMA_READ(sc,
1540 sc->sc_rxdescs[sc->sc_rxptr].gd_flags);
1542 if (rxstat & GEM_RD_OWN) {
1543 #ifdef GEM_RINT_TIMEOUT
1545 * The descriptor is still marked as owned, although
1546 * it is supposed to have completed. This has been
1547 * observed on some machines. Just exiting here
1548 * might leave the packet sitting around until another
1549 * one arrives to trigger a new interrupt, which is
1550 * generally undesirable, so set up a timeout.
1552 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS,
1553 gem_rint_timeout, sc);
1559 if (rxstat & GEM_RD_BAD_CRC) {
1561 device_printf(sc->sc_dev, "receive error: CRC error\n");
1562 GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1568 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1569 printf(" rxsoft %p descriptor %d: ",
1570 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr);
1571 printf("gd_flags: 0x%016llx\t",
1572 (long long)GEM_DMA_READ(sc,
1573 sc->sc_rxdescs[sc->sc_rxptr].gd_flags));
1574 printf("gd_addr: 0x%016llx\n",
1575 (long long)GEM_DMA_READ(sc,
1576 sc->sc_rxdescs[sc->sc_rxptr].gd_addr));
1581 * Allocate a new mbuf cluster. If that fails, we are
1582 * out of memory, and must drop the packet and recycle
1583 * the buffer that's already attached to this descriptor.
1585 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) {
1587 GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1593 * Update the RX kick register. This register has to point
1594 * to the descriptor after the last valid one (before the
1595 * current batch) and for optimum performance should be
1596 * incremented in multiples of 4 (the DMA engine fetches/
1597 * updates descriptors in batches of 4).
1599 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr);
1600 if ((sc->sc_rxptr % 4) == 0) {
1602 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1603 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK,
1604 (sc->sc_rxptr + GEM_NRXDESC - 4) &
1609 if (rxstat & GEM_RD_OWN)
1615 m->m_data += ETHER_ALIGN; /* first byte offset */
1616 m->m_pkthdr.rcvif = ifp;
1617 m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat);
1619 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1620 gem_rxcksum(m, rxstat);
1624 (*ifp->if_input)(ifp, m);
1629 CTR3(KTR_GEM, "%s: done sc->sc_rxptr %d, complete %d", __func__,
1630 sc->sc_rxptr, GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION));
1635 gem_add_rxbuf(struct gem_softc *sc, int idx)
1637 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1639 bus_dma_segment_t segs[1];
1642 GEM_LOCK_ASSERT(sc, MA_OWNED);
1644 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1647 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1650 /* Bzero the packet to check DMA. */
1651 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1654 if (rxs->rxs_mbuf != NULL) {
1655 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1656 BUS_DMASYNC_POSTREAD);
1657 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
1660 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap,
1661 m, segs, &nsegs, BUS_DMA_NOWAIT);
1663 device_printf(sc->sc_dev,
1664 "cannot load RS DMA map %d, error = %d\n", idx, error);
1668 /* If nsegs is wrong then the stack is corrupt. */
1670 ("%s: too many DMA segments (%d)", __func__, nsegs));
1672 rxs->rxs_paddr = segs[0].ds_addr;
1674 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1675 BUS_DMASYNC_PREREAD);
1677 GEM_INIT_RXDESC(sc, idx);
1683 gem_eint(struct gem_softc *sc, u_int status)
1686 sc->sc_ifp->if_ierrors++;
1687 if ((status & GEM_INTR_RX_TAG_ERR) != 0) {
1688 gem_reset_rxdma(sc);
1692 device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status);
1693 if ((status & GEM_INTR_BERR) != 0) {
1694 if ((sc->sc_flags & GEM_PCI) != 0)
1695 printf(", PCI bus error 0x%x\n",
1696 GEM_BANK1_READ_4(sc, GEM_PCI_ERROR_STATUS));
1698 printf(", SBus error 0x%x\n",
1699 GEM_BANK1_READ_4(sc, GEM_SBUS_STATUS));
1706 struct gem_softc *sc = v;
1707 uint32_t status, status2;
1710 status = GEM_BANK1_READ_4(sc, GEM_STATUS);
1713 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x",
1714 device_get_name(sc->sc_dev), __func__,
1715 (status >> GEM_STATUS_TX_COMPLETION_SHFT), (u_int)status);
1718 * PCS interrupts must be cleared, otherwise no traffic is passed!
1720 if ((status & GEM_INTR_PCS) != 0) {
1722 GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS) |
1723 GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS);
1724 if ((status2 & GEM_MII_INTERRUP_LINK) != 0)
1725 device_printf(sc->sc_dev,
1726 "%s: PCS link status changed\n", __func__);
1728 if ((status & GEM_MAC_CONTROL_STATUS) != 0) {
1729 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_STATUS);
1730 if ((status2 & GEM_MAC_PAUSED) != 0)
1731 device_printf(sc->sc_dev,
1732 "%s: PAUSE received (PAUSE time %d slots)\n",
1733 __func__, GEM_MAC_PAUSE_TIME(status2));
1734 if ((status2 & GEM_MAC_PAUSE) != 0)
1735 device_printf(sc->sc_dev,
1736 "%s: transited to PAUSE state\n", __func__);
1737 if ((status2 & GEM_MAC_RESUME) != 0)
1738 device_printf(sc->sc_dev,
1739 "%s: transited to non-PAUSE state\n", __func__);
1741 if ((status & GEM_INTR_MIF) != 0)
1742 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__);
1745 if (__predict_false(status &
1746 (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0)
1747 gem_eint(sc, status);
1749 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1752 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1755 if (__predict_false((status & GEM_INTR_TX_MAC) != 0)) {
1756 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS);
1758 ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
1759 GEM_MAC_TX_PEAK_EXP)) != 0)
1760 device_printf(sc->sc_dev,
1761 "MAC TX fault, status %x\n", status2);
1763 (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) {
1764 sc->sc_ifp->if_oerrors++;
1765 gem_init_locked(sc);
1768 if (__predict_false((status & GEM_INTR_RX_MAC) != 0)) {
1769 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS);
1771 * At least with GEM_SUN_GEM and some GEM_SUN_ERI
1772 * revisions GEM_MAC_RX_OVERFLOW happen often due to a
1773 * silicon bug so handle them silently. Moreover, it's
1774 * likely that the receiver has hung so we reset it.
1776 if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) {
1777 sc->sc_ifp->if_ierrors++;
1778 gem_reset_rxdma(sc);
1779 } else if ((status2 &
1780 ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0)
1781 device_printf(sc->sc_dev,
1782 "MAC RX fault, status %x\n", status2);
1788 gem_watchdog(struct gem_softc *sc)
1790 struct ifnet *ifp = sc->sc_ifp;
1792 GEM_LOCK_ASSERT(sc, MA_OWNED);
1796 "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x",
1797 __func__, GEM_BANK1_READ_4(sc, GEM_RX_CONFIG),
1798 GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS),
1799 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG));
1801 "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x",
1802 __func__, GEM_BANK1_READ_4(sc, GEM_TX_CONFIG),
1803 GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS),
1804 GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG));
1807 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1810 if ((sc->sc_flags & GEM_LINK) != 0)
1811 device_printf(sc->sc_dev, "device timeout\n");
1812 else if (bootverbose)
1813 device_printf(sc->sc_dev, "device timeout (no link)\n");
1816 /* Try to get more packets going. */
1817 gem_init_locked(sc);
1818 gem_start_locked(ifp);
1819 return (EJUSTRETURN);
1823 gem_mifinit(struct gem_softc *sc)
1826 /* Configure the MIF in frame mode. */
1827 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG,
1828 GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA);
1829 GEM_BANK1_BARRIER(sc, GEM_MIF_CONFIG, 4,
1830 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1836 * The MII interface supports at least three different operating modes:
1838 * Bitbang mode is implemented using data, clock and output enable registers.
1840 * Frame mode is implemented by loading a complete frame into the frame
1841 * register and polling the valid bit for completion.
1843 * Polling mode uses the frame register but completion is indicated by
1848 gem_mii_readreg(device_t dev, int phy, int reg)
1850 struct gem_softc *sc;
1854 #ifdef GEM_DEBUG_PHY
1855 printf("%s: phy %d reg %d\n", __func__, phy, reg);
1858 sc = device_get_softc(dev);
1859 if ((sc->sc_flags & GEM_SERDES) != 0) {
1862 reg = GEM_MII_CONTROL;
1865 reg = GEM_MII_STATUS;
1874 reg = GEM_MII_ANLPAR;
1877 return (EXTSR_1000XFDX | EXTSR_1000XHDX);
1879 device_printf(sc->sc_dev,
1880 "%s: unhandled register %d\n", __func__, reg);
1883 return (GEM_BANK1_READ_4(sc, reg));
1886 /* Construct the frame command. */
1887 v = GEM_MIF_FRAME_READ |
1888 (phy << GEM_MIF_PHY_SHIFT) |
1889 (reg << GEM_MIF_REG_SHIFT);
1891 GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v);
1892 GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4,
1893 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1894 for (n = 0; n < 100; n++) {
1896 v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME);
1897 if (v & GEM_MIF_FRAME_TA0)
1898 return (v & GEM_MIF_FRAME_DATA);
1901 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
1906 gem_mii_writereg(device_t dev, int phy, int reg, int val)
1908 struct gem_softc *sc;
1912 #ifdef GEM_DEBUG_PHY
1913 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__);
1916 sc = device_get_softc(dev);
1917 if ((sc->sc_flags & GEM_SERDES) != 0) {
1920 reg = GEM_MII_STATUS;
1923 reg = GEM_MII_CONTROL;
1924 if ((val & GEM_MII_CONTROL_RESET) == 0)
1926 GEM_BANK1_WRITE_4(sc, GEM_MII_CONTROL, val);
1927 GEM_BANK1_BARRIER(sc, GEM_MII_CONTROL, 4,
1928 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1929 if (!GEM_BANK1_BITWAIT(sc, GEM_MII_CONTROL,
1930 GEM_MII_CONTROL_RESET, 0))
1931 device_printf(sc->sc_dev,
1932 "cannot reset PCS\n");
1935 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, 0);
1936 GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4,
1937 BUS_SPACE_BARRIER_WRITE);
1938 GEM_BANK1_WRITE_4(sc, GEM_MII_ANAR, val);
1939 GEM_BANK1_BARRIER(sc, GEM_MII_ANAR, 4,
1940 BUS_SPACE_BARRIER_WRITE);
1941 GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
1942 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
1943 GEM_BANK1_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4,
1944 BUS_SPACE_BARRIER_WRITE);
1945 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG,
1946 GEM_MII_CONFIG_ENABLE);
1947 GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4,
1948 BUS_SPACE_BARRIER_WRITE);
1951 reg = GEM_MII_ANLPAR;
1954 device_printf(sc->sc_dev,
1955 "%s: unhandled register %d\n", __func__, reg);
1958 GEM_BANK1_WRITE_4(sc, reg, val);
1959 GEM_BANK1_BARRIER(sc, reg, 4,
1960 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1964 /* Construct the frame command. */
1965 v = GEM_MIF_FRAME_WRITE |
1966 (phy << GEM_MIF_PHY_SHIFT) |
1967 (reg << GEM_MIF_REG_SHIFT) |
1968 (val & GEM_MIF_FRAME_DATA);
1970 GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v);
1971 GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4,
1972 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1973 for (n = 0; n < 100; n++) {
1975 v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME);
1976 if (v & GEM_MIF_FRAME_TA0)
1980 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
1985 gem_mii_statchg(device_t dev)
1987 struct gem_softc *sc;
1989 uint32_t rxcfg, txcfg, v;
1991 sc = device_get_softc(dev);
1993 GEM_LOCK_ASSERT(sc, MA_OWNED);
1996 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
1997 device_printf(sc->sc_dev, "%s: status change\n", __func__);
2000 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
2001 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
2002 sc->sc_flags |= GEM_LINK;
2004 sc->sc_flags &= ~GEM_LINK;
2006 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) {
2018 * The configuration done here corresponds to the steps F) and
2019 * G) and as far as enabling of RX and TX MAC goes also step H)
2020 * of the initialization sequence outlined in section 3.2.1 of
2021 * the GEM Gigabit Ethernet ASIC Specification.
2024 rxcfg = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG);
2025 rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE);
2026 txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT;
2027 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2028 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS;
2029 else if (gigabit != 0) {
2030 rxcfg |= GEM_MAC_RX_CARR_EXTEND;
2031 txcfg |= GEM_MAC_TX_CARR_EXTEND;
2033 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 0);
2034 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4,
2035 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2036 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0))
2037 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
2038 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, txcfg);
2039 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 0);
2040 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
2041 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2042 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
2043 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
2044 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, rxcfg);
2046 v = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_CONFIG) &
2047 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
2048 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2049 IFM_ETH_RXPAUSE) != 0)
2050 v |= GEM_MAC_CC_RX_PAUSE;
2051 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2052 IFM_ETH_TXPAUSE) != 0)
2053 v |= GEM_MAC_CC_TX_PAUSE;
2054 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_CONFIG, v);
2056 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 &&
2058 GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME,
2059 GEM_MAC_SLOT_TIME_CARR_EXTEND);
2061 GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME,
2062 GEM_MAC_SLOT_TIME_NORMAL);
2064 /* XIF Configuration */
2065 v = GEM_MAC_XIF_LINK_LED;
2066 v |= GEM_MAC_XIF_TX_MII_ENA;
2067 if ((sc->sc_flags & GEM_SERDES) == 0) {
2068 if ((GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) &
2069 GEM_MIF_CONFIG_PHY_SEL) != 0) {
2070 /* External MII needs echo disable if half duplex. */
2071 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2073 v |= GEM_MAC_XIF_ECHO_DISABL;
2076 * Internal MII needs buffer enable.
2077 * XXX buffer enable makes only sense for an
2080 v |= GEM_MAC_XIF_MII_BUF_ENA;
2083 v |= GEM_MAC_XIF_GMII_MODE;
2084 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2085 v |= GEM_MAC_XIF_FDPLX_LED;
2086 GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, v);
2088 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2089 (sc->sc_flags & GEM_LINK) != 0) {
2090 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG,
2091 txcfg | GEM_MAC_TX_ENABLE);
2092 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG,
2093 rxcfg | GEM_MAC_RX_ENABLE);
2098 gem_mediachange(struct ifnet *ifp)
2100 struct gem_softc *sc = ifp->if_softc;
2103 /* XXX add support for serial media. */
2106 error = mii_mediachg(sc->sc_mii);
2112 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2114 struct gem_softc *sc = ifp->if_softc;
2117 if ((ifp->if_flags & IFF_UP) == 0) {
2122 mii_pollstat(sc->sc_mii);
2123 ifmr->ifm_active = sc->sc_mii->mii_media_active;
2124 ifmr->ifm_status = sc->sc_mii->mii_media_status;
2129 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2131 struct gem_softc *sc = ifp->if_softc;
2132 struct ifreq *ifr = (struct ifreq *)data;
2139 if ((ifp->if_flags & IFF_UP) != 0) {
2140 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2141 ((ifp->if_flags ^ sc->sc_ifflags) &
2142 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
2145 gem_init_locked(sc);
2146 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2148 if ((ifp->if_flags & IFF_LINK0) != 0)
2149 sc->sc_csum_features |= CSUM_UDP;
2151 sc->sc_csum_features &= ~CSUM_UDP;
2152 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2153 ifp->if_hwassist = sc->sc_csum_features;
2154 sc->sc_ifflags = ifp->if_flags;
2165 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
2169 ifp->if_capenable = ifr->ifr_reqcap;
2170 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2171 ifp->if_hwassist = sc->sc_csum_features;
2173 ifp->if_hwassist = 0;
2177 error = ether_ioctl(ifp, cmd, data);
2185 gem_setladrf(struct gem_softc *sc)
2187 struct ifnet *ifp = sc->sc_ifp;
2188 struct ifmultiaddr *inm;
2193 GEM_LOCK_ASSERT(sc, MA_OWNED);
2195 /* Get the current RX configuration. */
2196 v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG);
2199 * Turn off promiscuous mode, promiscuous group mode (all multicast),
2200 * and hash filter. Depending on the case, the right bit will be
2203 v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_HASH_FILTER |
2204 GEM_MAC_RX_PROMISC_GRP);
2206 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);
2207 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
2208 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2209 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER,
2211 device_printf(sc->sc_dev, "cannot disable RX hash filter\n");
2213 if ((ifp->if_flags & IFF_PROMISC) != 0) {
2214 v |= GEM_MAC_RX_PROMISCUOUS;
2217 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
2218 v |= GEM_MAC_RX_PROMISC_GRP;
2223 * Set up multicast address filter by passing all multicast
2224 * addresses through a crc generator, and then using the high
2225 * order 8 bits as an index into the 256 bit logical address
2226 * filter. The high order 4 bits selects the word, while the
2227 * other 4 bits select the bit within the word (where bit 0
2231 /* Clear the hash table. */
2232 memset(hash, 0, sizeof(hash));
2234 if_maddr_rlock(ifp);
2235 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
2236 if (inm->ifma_addr->sa_family != AF_LINK)
2238 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
2239 inm->ifma_addr), ETHER_ADDR_LEN);
2241 /* We just want the 8 most significant bits. */
2244 /* Set the corresponding bit in the filter. */
2245 hash[crc >> 4] |= 1 << (15 - (crc & 15));
2247 if_maddr_runlock(ifp);
2249 v |= GEM_MAC_RX_HASH_FILTER;
2251 /* Now load the hash table into the chip (if we are using it). */
2252 for (i = 0; i < 16; i++)
2253 GEM_BANK1_WRITE_4(sc,
2254 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),
2258 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);