2 * Copyright (C) 2001 Eduardo Horvath.
3 * Copyright (c) 2001-2003 Thomas Moestl
4 * Copyright (c) 2007-2009 Marius Strobl <marius@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
29 * from: FreeBSD: if_gem.c 182060 2008-08-23 15:03:26Z marius
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
36 * driver for Sun Cassini/Cassini+ and National Semiconductor DP83065
37 * Saturn Gigabit Ethernet controllers
44 #include <sys/param.h>
45 #include <sys/systm.h>
47 #include <sys/callout.h>
48 #include <sys/endian.h>
50 #include <sys/malloc.h>
51 #include <sys/kernel.h>
53 #include <sys/module.h>
54 #include <sys/mutex.h>
55 #include <sys/refcount.h>
56 #include <sys/resource.h>
58 #include <sys/socket.h>
59 #include <sys/sockio.h>
60 #include <sys/taskqueue.h>
63 #include <net/ethernet.h>
65 #include <net/if_arp.h>
66 #include <net/if_dl.h>
67 #include <net/if_media.h>
68 #include <net/if_types.h>
69 #include <net/if_vlan_var.h>
71 #include <netinet/in.h>
72 #include <netinet/in_systm.h>
73 #include <netinet/ip.h>
74 #include <netinet/tcp.h>
75 #include <netinet/udp.h>
77 #include <machine/bus.h>
78 #if defined(__powerpc__) || defined(__sparc64__)
79 #include <dev/ofw/ofw_bus.h>
80 #include <dev/ofw/openfirm.h>
81 #include <machine/ofw_machdep.h>
83 #include <machine/resource.h>
85 #include <dev/mii/mii.h>
86 #include <dev/mii/miivar.h>
88 #include <dev/cas/if_casreg.h>
89 #include <dev/cas/if_casvar.h>
91 #include <dev/pci/pcireg.h>
92 #include <dev/pci/pcivar.h>
94 #include "miibus_if.h"
96 #define RINGASSERT(n , min, max) \
97 CTASSERT(powerof2(n) && (n) >= (min) && (n) <= (max))
99 RINGASSERT(CAS_NRXCOMP, 128, 32768);
100 RINGASSERT(CAS_NRXDESC, 32, 8192);
101 RINGASSERT(CAS_NRXDESC2, 32, 8192);
102 RINGASSERT(CAS_NTXDESC, 32, 8192);
106 #define CCDASSERT(m, a) \
107 CTASSERT((offsetof(struct cas_control_data, m) & ((a) - 1)) == 0)
109 CCDASSERT(ccd_rxcomps, CAS_RX_COMP_ALIGN);
110 CCDASSERT(ccd_rxdescs, CAS_RX_DESC_ALIGN);
111 CCDASSERT(ccd_rxdescs2, CAS_RX_DESC_ALIGN);
115 #define CAS_TRIES 10000
118 * According to documentation, the hardware has support for basic TCP
119 * checksum offloading only, in practice this can be also used for UDP
120 * however (i.e. the problem of previous Sun NICs that a checksum of 0x0
121 * is not converted to 0xffff no longer exists).
123 #define CAS_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
125 static inline void cas_add_rxdesc(struct cas_softc *sc, u_int idx);
126 static int cas_attach(struct cas_softc *sc);
127 static int cas_bitwait(struct cas_softc *sc, bus_addr_t r, uint32_t clr,
129 static void cas_cddma_callback(void *xsc, bus_dma_segment_t *segs,
130 int nsegs, int error);
131 static void cas_detach(struct cas_softc *sc);
132 static int cas_disable_rx(struct cas_softc *sc);
133 static int cas_disable_tx(struct cas_softc *sc);
134 static void cas_eint(struct cas_softc *sc, u_int status);
135 static void cas_free(void *arg1, void* arg2);
136 static void cas_init(void *xsc);
137 static void cas_init_locked(struct cas_softc *sc);
138 static void cas_init_regs(struct cas_softc *sc);
139 static int cas_intr(void *v);
140 static void cas_intr_task(void *arg, int pending __unused);
141 static int cas_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
142 static int cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head);
143 static int cas_mediachange(struct ifnet *ifp);
144 static void cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr);
145 static void cas_meminit(struct cas_softc *sc);
146 static void cas_mifinit(struct cas_softc *sc);
147 static int cas_mii_readreg(device_t dev, int phy, int reg);
148 static void cas_mii_statchg(device_t dev);
149 static int cas_mii_writereg(device_t dev, int phy, int reg, int val);
150 static void cas_reset(struct cas_softc *sc);
151 static int cas_reset_rx(struct cas_softc *sc);
152 static int cas_reset_tx(struct cas_softc *sc);
153 static void cas_resume(struct cas_softc *sc);
154 static u_int cas_descsize(u_int sz);
155 static void cas_rint(struct cas_softc *sc);
156 static void cas_rint_timeout(void *arg);
157 static inline void cas_rxcksum(struct mbuf *m, uint16_t cksum);
158 static inline void cas_rxcompinit(struct cas_rx_comp *rxcomp);
159 static u_int cas_rxcompsize(u_int sz);
160 static void cas_rxdma_callback(void *xsc, bus_dma_segment_t *segs,
161 int nsegs, int error);
162 static void cas_setladrf(struct cas_softc *sc);
163 static void cas_start(struct ifnet *ifp);
164 static void cas_stop(struct ifnet *ifp);
165 static void cas_suspend(struct cas_softc *sc);
166 static void cas_tick(void *arg);
167 static void cas_tint(struct cas_softc *sc);
168 static void cas_tx_task(void *arg, int pending __unused);
169 static inline void cas_txkick(struct cas_softc *sc);
170 static void cas_watchdog(struct cas_softc *sc);
172 static devclass_t cas_devclass;
174 MODULE_DEPEND(cas, ether, 1, 1, 1);
175 MODULE_DEPEND(cas, miibus, 1, 1, 1);
179 #define KTR_CAS KTR_CT2
183 cas_attach(struct cas_softc *sc)
185 struct cas_txsoft *txs;
190 /* Set up ifnet structure. */
191 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
195 if_initname(ifp, device_get_name(sc->sc_dev),
196 device_get_unit(sc->sc_dev));
197 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
198 ifp->if_start = cas_start;
199 ifp->if_ioctl = cas_ioctl;
200 ifp->if_init = cas_init;
201 IFQ_SET_MAXLEN(&ifp->if_snd, CAS_TXQUEUELEN);
202 ifp->if_snd.ifq_drv_maxlen = CAS_TXQUEUELEN;
203 IFQ_SET_READY(&ifp->if_snd);
205 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
206 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
207 /* Create local taskq. */
208 TASK_INIT(&sc->sc_intr_task, 0, cas_intr_task, sc);
209 TASK_INIT(&sc->sc_tx_task, 1, cas_tx_task, ifp);
210 sc->sc_tq = taskqueue_create_fast("cas_taskq", M_WAITOK,
211 taskqueue_thread_enqueue, &sc->sc_tq);
212 if (sc->sc_tq == NULL) {
213 device_printf(sc->sc_dev, "could not create taskqueue\n");
217 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
218 device_get_nameunit(sc->sc_dev));
220 /* Make sure the chip is stopped. */
223 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
224 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
225 BUS_SPACE_MAXSIZE, 0, BUS_SPACE_MAXSIZE, 0, NULL, NULL,
230 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
231 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
232 CAS_PAGE_SIZE, 1, CAS_PAGE_SIZE, 0, NULL, NULL, &sc->sc_rdmatag);
236 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
237 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
238 MCLBYTES * CAS_NTXSEGS, CAS_NTXSEGS, MCLBYTES,
239 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
243 error = bus_dma_tag_create(sc->sc_pdmatag, CAS_TX_DESC_ALIGN, 0,
244 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
245 sizeof(struct cas_control_data), 1,
246 sizeof(struct cas_control_data), 0,
247 NULL, NULL, &sc->sc_cdmatag);
252 * Allocate the control data structures, create and load the
255 if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
256 (void **)&sc->sc_control_data,
257 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
258 &sc->sc_cddmamap)) != 0) {
259 device_printf(sc->sc_dev,
260 "unable to allocate control data, error = %d\n", error);
265 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
266 sc->sc_control_data, sizeof(struct cas_control_data),
267 cas_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
268 device_printf(sc->sc_dev,
269 "unable to load control data DMA map, error = %d\n",
275 * Initialize the transmit job descriptors.
277 STAILQ_INIT(&sc->sc_txfreeq);
278 STAILQ_INIT(&sc->sc_txdirtyq);
281 * Create the transmit buffer DMA maps.
284 for (i = 0; i < CAS_TXQUEUELEN; i++) {
285 txs = &sc->sc_txsoft[i];
286 txs->txs_mbuf = NULL;
288 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
289 &txs->txs_dmamap)) != 0) {
290 device_printf(sc->sc_dev,
291 "unable to create TX DMA map %d, error = %d\n",
295 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
299 * Allocate the receive buffers, create and load the DMA maps
302 for (i = 0; i < CAS_NRXDESC; i++) {
303 if ((error = bus_dmamem_alloc(sc->sc_rdmatag,
304 &sc->sc_rxdsoft[i].rxds_buf, BUS_DMA_WAITOK,
305 &sc->sc_rxdsoft[i].rxds_dmamap)) != 0) {
306 device_printf(sc->sc_dev,
307 "unable to allocate RX buffer %d, error = %d\n",
313 sc->sc_rxdsoft[i].rxds_paddr = 0;
314 if ((error = bus_dmamap_load(sc->sc_rdmatag,
315 sc->sc_rxdsoft[i].rxds_dmamap, sc->sc_rxdsoft[i].rxds_buf,
316 CAS_PAGE_SIZE, cas_rxdma_callback, sc, 0)) != 0 ||
317 sc->sc_rxdsoft[i].rxds_paddr == 0) {
318 device_printf(sc->sc_dev,
319 "unable to load RX DMA map %d, error = %d\n",
325 if ((sc->sc_flags & CAS_SERDES) == 0) {
326 CAS_WRITE_4(sc, CAS_PCS_DATAPATH, CAS_PCS_DATAPATH_MII);
327 CAS_BARRIER(sc, CAS_PCS_DATAPATH, 4,
328 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
331 * Look for an external PHY.
334 v = CAS_READ_4(sc, CAS_MIF_CONF);
335 if ((v & CAS_MIF_CONF_MDI1) != 0) {
336 v |= CAS_MIF_CONF_PHY_SELECT;
337 CAS_WRITE_4(sc, CAS_MIF_CONF, v);
338 CAS_BARRIER(sc, CAS_MIF_CONF, 4,
339 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
340 /* Enable/unfreeze the GMII pins of Saturn. */
341 if (sc->sc_variant == CAS_SATURN) {
342 CAS_WRITE_4(sc, CAS_SATURN_PCFG, 0);
343 CAS_BARRIER(sc, CAS_SATURN_PCFG, 4,
344 BUS_SPACE_BARRIER_READ |
345 BUS_SPACE_BARRIER_WRITE);
347 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
348 cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK,
349 MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE);
352 * Fall back on an internal PHY if no external PHY was found.
354 if (error != 0 && (v & CAS_MIF_CONF_MDI0) != 0) {
355 v &= ~CAS_MIF_CONF_PHY_SELECT;
356 CAS_WRITE_4(sc, CAS_MIF_CONF, v);
357 CAS_BARRIER(sc, CAS_MIF_CONF, 4,
358 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
359 /* Freeze the GMII pins of Saturn for saving power. */
360 if (sc->sc_variant == CAS_SATURN) {
361 CAS_WRITE_4(sc, CAS_SATURN_PCFG,
362 CAS_SATURN_PCFG_FSI);
363 CAS_BARRIER(sc, CAS_SATURN_PCFG, 4,
364 BUS_SPACE_BARRIER_READ |
365 BUS_SPACE_BARRIER_WRITE);
367 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
368 cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK,
369 MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE);
373 * Use the external PCS SERDES.
375 CAS_WRITE_4(sc, CAS_PCS_DATAPATH, CAS_PCS_DATAPATH_SERDES);
376 CAS_BARRIER(sc, CAS_PCS_DATAPATH, 4, BUS_SPACE_BARRIER_WRITE);
377 /* Enable/unfreeze the SERDES pins of Saturn. */
378 if (sc->sc_variant == CAS_SATURN) {
379 CAS_WRITE_4(sc, CAS_SATURN_PCFG, 0);
380 CAS_BARRIER(sc, CAS_SATURN_PCFG, 4,
381 BUS_SPACE_BARRIER_WRITE);
383 CAS_WRITE_4(sc, CAS_PCS_SERDES_CTRL, CAS_PCS_SERDES_CTRL_ESD);
384 CAS_BARRIER(sc, CAS_PCS_SERDES_CTRL, 4,
385 BUS_SPACE_BARRIER_WRITE);
386 CAS_WRITE_4(sc, CAS_PCS_CONF, CAS_PCS_CONF_EN);
387 CAS_BARRIER(sc, CAS_PCS_CONF, 4,
388 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
389 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
390 cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK,
391 CAS_PHYAD_EXTERNAL, MII_OFFSET_ANY, MIIF_DOPAUSE);
394 device_printf(sc->sc_dev, "attaching PHYs failed\n");
397 sc->sc_mii = device_get_softc(sc->sc_miibus);
400 * From this point forward, the attachment cannot fail. A failure
401 * before this point releases all resources that may have been
405 /* Announce FIFO sizes. */
406 v = CAS_READ_4(sc, CAS_TX_FIFO_SIZE);
407 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
408 CAS_RX_FIFO_SIZE / 1024, v / 16);
410 /* Attach the interface. */
411 ether_ifattach(ifp, sc->sc_enaddr);
414 * Tell the upper layer(s) we support long frames/checksum offloads.
416 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
417 ifp->if_capabilities = IFCAP_VLAN_MTU;
418 if ((sc->sc_flags & CAS_NO_CSUM) == 0) {
419 ifp->if_capabilities |= IFCAP_HWCSUM;
420 ifp->if_hwassist = CAS_CSUM_FEATURES;
422 ifp->if_capenable = ifp->if_capabilities;
427 * Free any resources we've allocated during the failed attach
428 * attempt. Do this in reverse order and fall through.
431 for (i = 0; i < CAS_NRXDESC; i++)
432 if (sc->sc_rxdsoft[i].rxds_paddr != 0)
433 bus_dmamap_unload(sc->sc_rdmatag,
434 sc->sc_rxdsoft[i].rxds_dmamap);
436 for (i = 0; i < CAS_NRXDESC; i++)
437 if (sc->sc_rxdsoft[i].rxds_buf != NULL)
438 bus_dmamem_free(sc->sc_rdmatag,
439 sc->sc_rxdsoft[i].rxds_buf,
440 sc->sc_rxdsoft[i].rxds_dmamap);
442 for (i = 0; i < CAS_TXQUEUELEN; i++)
443 if (sc->sc_txsoft[i].txs_dmamap != NULL)
444 bus_dmamap_destroy(sc->sc_tdmatag,
445 sc->sc_txsoft[i].txs_dmamap);
446 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
448 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
451 bus_dma_tag_destroy(sc->sc_cdmatag);
453 bus_dma_tag_destroy(sc->sc_tdmatag);
455 bus_dma_tag_destroy(sc->sc_rdmatag);
457 bus_dma_tag_destroy(sc->sc_pdmatag);
459 taskqueue_free(sc->sc_tq);
466 cas_detach(struct cas_softc *sc)
468 struct ifnet *ifp = sc->sc_ifp;
475 callout_drain(&sc->sc_tick_ch);
476 callout_drain(&sc->sc_rx_ch);
477 taskqueue_drain(sc->sc_tq, &sc->sc_intr_task);
478 taskqueue_drain(sc->sc_tq, &sc->sc_tx_task);
480 taskqueue_free(sc->sc_tq);
481 device_delete_child(sc->sc_dev, sc->sc_miibus);
483 for (i = 0; i < CAS_NRXDESC; i++)
484 if (sc->sc_rxdsoft[i].rxds_dmamap != NULL)
485 bus_dmamap_sync(sc->sc_rdmatag,
486 sc->sc_rxdsoft[i].rxds_dmamap,
487 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
488 for (i = 0; i < CAS_NRXDESC; i++)
489 if (sc->sc_rxdsoft[i].rxds_paddr != 0)
490 bus_dmamap_unload(sc->sc_rdmatag,
491 sc->sc_rxdsoft[i].rxds_dmamap);
492 for (i = 0; i < CAS_NRXDESC; i++)
493 if (sc->sc_rxdsoft[i].rxds_buf != NULL)
494 bus_dmamem_free(sc->sc_rdmatag,
495 sc->sc_rxdsoft[i].rxds_buf,
496 sc->sc_rxdsoft[i].rxds_dmamap);
497 for (i = 0; i < CAS_TXQUEUELEN; i++)
498 if (sc->sc_txsoft[i].txs_dmamap != NULL)
499 bus_dmamap_destroy(sc->sc_tdmatag,
500 sc->sc_txsoft[i].txs_dmamap);
501 CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
502 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
503 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
505 bus_dma_tag_destroy(sc->sc_cdmatag);
506 bus_dma_tag_destroy(sc->sc_tdmatag);
507 bus_dma_tag_destroy(sc->sc_rdmatag);
508 bus_dma_tag_destroy(sc->sc_pdmatag);
512 cas_suspend(struct cas_softc *sc)
514 struct ifnet *ifp = sc->sc_ifp;
522 cas_resume(struct cas_softc *sc)
524 struct ifnet *ifp = sc->sc_ifp;
528 * On resume all registers have to be initialized again like
531 sc->sc_flags &= ~CAS_INITED;
532 if (ifp->if_flags & IFF_UP)
538 cas_rxcksum(struct mbuf *m, uint16_t cksum)
540 struct ether_header *eh;
544 int32_t hlen, len, pktlen;
547 pktlen = m->m_pkthdr.len;
548 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
550 eh = mtod(m, struct ether_header *);
551 if (eh->ether_type != htons(ETHERTYPE_IP))
553 ip = (struct ip *)(eh + 1);
554 if (ip->ip_v != IPVERSION)
557 hlen = ip->ip_hl << 2;
558 pktlen -= sizeof(struct ether_header);
559 if (hlen < sizeof(struct ip))
561 if (ntohs(ip->ip_len) < hlen)
563 if (ntohs(ip->ip_len) != pktlen)
565 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
566 return; /* Cannot handle fragmented packet. */
570 if (pktlen < (hlen + sizeof(struct tcphdr)))
574 if (pktlen < (hlen + sizeof(struct udphdr)))
576 uh = (struct udphdr *)((uint8_t *)ip + hlen);
578 return; /* no checksum */
585 /* checksum fixup for IP options */
586 len = hlen - sizeof(struct ip);
588 opts = (uint16_t *)(ip + 1);
589 for (; len > 0; len -= sizeof(uint16_t), opts++) {
590 temp32 = cksum - *opts;
591 temp32 = (temp32 >> 16) + (temp32 & 65535);
592 cksum = temp32 & 65535;
595 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
596 m->m_pkthdr.csum_data = cksum;
600 cas_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
602 struct cas_softc *sc = xsc;
607 panic("%s: bad control buffer segment count", __func__);
608 sc->sc_cddma = segs[0].ds_addr;
612 cas_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
614 struct cas_softc *sc = xsc;
619 panic("%s: bad RX buffer segment count", __func__);
620 sc->sc_rxdsoft[sc->sc_rxdptr].rxds_paddr = segs[0].ds_addr;
626 struct cas_softc *sc = arg;
627 struct ifnet *ifp = sc->sc_ifp;
630 CAS_LOCK_ASSERT(sc, MA_OWNED);
633 * Unload collision and error counters.
635 ifp->if_collisions +=
636 CAS_READ_4(sc, CAS_MAC_NORM_COLL_CNT) +
637 CAS_READ_4(sc, CAS_MAC_FIRST_COLL_CNT);
638 v = CAS_READ_4(sc, CAS_MAC_EXCESS_COLL_CNT) +
639 CAS_READ_4(sc, CAS_MAC_LATE_COLL_CNT);
640 ifp->if_collisions += v;
641 ifp->if_oerrors += v;
643 CAS_READ_4(sc, CAS_MAC_RX_LEN_ERR_CNT) +
644 CAS_READ_4(sc, CAS_MAC_RX_ALIGN_ERR) +
645 CAS_READ_4(sc, CAS_MAC_RX_CRC_ERR_CNT) +
646 CAS_READ_4(sc, CAS_MAC_RX_CODE_VIOL);
649 * Then clear the hardware counters.
651 CAS_WRITE_4(sc, CAS_MAC_NORM_COLL_CNT, 0);
652 CAS_WRITE_4(sc, CAS_MAC_FIRST_COLL_CNT, 0);
653 CAS_WRITE_4(sc, CAS_MAC_EXCESS_COLL_CNT, 0);
654 CAS_WRITE_4(sc, CAS_MAC_LATE_COLL_CNT, 0);
655 CAS_WRITE_4(sc, CAS_MAC_RX_LEN_ERR_CNT, 0);
656 CAS_WRITE_4(sc, CAS_MAC_RX_ALIGN_ERR, 0);
657 CAS_WRITE_4(sc, CAS_MAC_RX_CRC_ERR_CNT, 0);
658 CAS_WRITE_4(sc, CAS_MAC_RX_CODE_VIOL, 0);
660 mii_tick(sc->sc_mii);
662 if (sc->sc_txfree != CAS_MAXTXFREE)
667 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc);
671 cas_bitwait(struct cas_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set)
676 for (i = CAS_TRIES; i--; DELAY(100)) {
677 reg = CAS_READ_4(sc, r);
678 if ((reg & clr) == 0 && (reg & set) == set)
685 cas_reset(struct cas_softc *sc)
689 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__);
691 /* Disable all interrupts in order to avoid spurious ones. */
692 CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff);
698 * Do a full reset modulo the result of the last auto-negotiation
699 * when using the SERDES.
701 CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX |
702 ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0));
703 CAS_BARRIER(sc, CAS_RESET, 4,
704 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
706 if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0))
707 device_printf(sc->sc_dev, "cannot reset device\n");
711 cas_stop(struct ifnet *ifp)
713 struct cas_softc *sc = ifp->if_softc;
714 struct cas_txsoft *txs;
717 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__);
720 callout_stop(&sc->sc_tick_ch);
721 callout_stop(&sc->sc_rx_ch);
723 /* Disable all interrupts in order to avoid spurious ones. */
724 CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff);
730 * Release any queued transmit buffers.
732 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
733 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
734 if (txs->txs_ndescs != 0) {
735 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
736 BUS_DMASYNC_POSTWRITE);
737 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
738 if (txs->txs_mbuf != NULL) {
739 m_freem(txs->txs_mbuf);
740 txs->txs_mbuf = NULL;
743 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
747 * Mark the interface down and cancel the watchdog timer.
749 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
750 sc->sc_flags &= ~CAS_LINK;
751 sc->sc_wdog_timer = 0;
755 cas_reset_rx(struct cas_softc *sc)
759 * Resetting while DMA is in progress can cause a bus hang, so we
763 CAS_WRITE_4(sc, CAS_RX_CONF, 0);
764 CAS_BARRIER(sc, CAS_RX_CONF, 4,
765 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
766 if (!cas_bitwait(sc, CAS_RX_CONF, CAS_RX_CONF_RXDMA_EN, 0))
767 device_printf(sc->sc_dev, "cannot disable RX DMA\n");
769 /* Finally, reset the ERX. */
770 CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_RX |
771 ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0));
772 CAS_BARRIER(sc, CAS_RESET, 4,
773 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
774 if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0)) {
775 device_printf(sc->sc_dev, "cannot reset receiver\n");
782 cas_reset_tx(struct cas_softc *sc)
786 * Resetting while DMA is in progress can cause a bus hang, so we
790 CAS_WRITE_4(sc, CAS_TX_CONF, 0);
791 CAS_BARRIER(sc, CAS_TX_CONF, 4,
792 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
793 if (!cas_bitwait(sc, CAS_TX_CONF, CAS_TX_CONF_TXDMA_EN, 0))
794 device_printf(sc->sc_dev, "cannot disable TX DMA\n");
796 /* Finally, reset the ETX. */
797 CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_TX |
798 ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0));
799 CAS_BARRIER(sc, CAS_RESET, 4,
800 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
801 if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0)) {
802 device_printf(sc->sc_dev, "cannot reset transmitter\n");
809 cas_disable_rx(struct cas_softc *sc)
812 CAS_WRITE_4(sc, CAS_MAC_RX_CONF,
813 CAS_READ_4(sc, CAS_MAC_RX_CONF) & ~CAS_MAC_RX_CONF_EN);
814 CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4,
815 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
816 return (cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_EN, 0));
820 cas_disable_tx(struct cas_softc *sc)
823 CAS_WRITE_4(sc, CAS_MAC_TX_CONF,
824 CAS_READ_4(sc, CAS_MAC_TX_CONF) & ~CAS_MAC_TX_CONF_EN);
825 CAS_BARRIER(sc, CAS_MAC_TX_CONF, 4,
826 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
827 return (cas_bitwait(sc, CAS_MAC_TX_CONF, CAS_MAC_TX_CONF_EN, 0));
831 cas_rxcompinit(struct cas_rx_comp *rxcomp)
834 rxcomp->crc_word1 = 0;
835 rxcomp->crc_word2 = 0;
837 htole64(CAS_SET(ETHER_HDR_LEN + sizeof(struct ip), CAS_RC3_CSO));
838 rxcomp->crc_word4 = htole64(CAS_RC4_ZERO);
842 cas_meminit(struct cas_softc *sc)
846 CAS_LOCK_ASSERT(sc, MA_OWNED);
849 * Initialize the transmit descriptor ring.
851 for (i = 0; i < CAS_NTXDESC; i++) {
852 sc->sc_txdescs[i].cd_flags = 0;
853 sc->sc_txdescs[i].cd_buf_ptr = 0;
855 sc->sc_txfree = CAS_MAXTXFREE;
860 * Initialize the receive completion ring.
862 for (i = 0; i < CAS_NRXCOMP; i++)
863 cas_rxcompinit(&sc->sc_rxcomps[i]);
867 * Initialize the first receive descriptor ring. We leave
868 * the second one zeroed as we don't actually use it.
870 for (i = 0; i < CAS_NRXDESC; i++)
871 CAS_INIT_RXDESC(sc, i, i);
874 CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
878 cas_descsize(u_int sz)
883 return (CAS_DESC_32);
885 return (CAS_DESC_64);
887 return (CAS_DESC_128);
889 return (CAS_DESC_256);
891 return (CAS_DESC_512);
893 return (CAS_DESC_1K);
895 return (CAS_DESC_2K);
897 return (CAS_DESC_4K);
899 return (CAS_DESC_8K);
901 printf("%s: invalid descriptor ring size %d\n", __func__, sz);
902 return (CAS_DESC_32);
907 cas_rxcompsize(u_int sz)
912 return (CAS_RX_CONF_COMP_128);
914 return (CAS_RX_CONF_COMP_256);
916 return (CAS_RX_CONF_COMP_512);
918 return (CAS_RX_CONF_COMP_1K);
920 return (CAS_RX_CONF_COMP_2K);
922 return (CAS_RX_CONF_COMP_4K);
924 return (CAS_RX_CONF_COMP_8K);
926 return (CAS_RX_CONF_COMP_16K);
928 return (CAS_RX_CONF_COMP_32K);
930 printf("%s: invalid dcompletion ring size %d\n", __func__, sz);
931 return (CAS_RX_CONF_COMP_128);
938 struct cas_softc *sc = xsc;
946 * Initialization of interface; set up initialization block
947 * and transmit/receive descriptor rings.
950 cas_init_locked(struct cas_softc *sc)
952 struct ifnet *ifp = sc->sc_ifp;
955 CAS_LOCK_ASSERT(sc, MA_OWNED);
957 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
961 CTR2(KTR_CAS, "%s: %s: calling stop", device_get_name(sc->sc_dev),
965 * Initialization sequence. The numbered steps below correspond
966 * to the sequence outlined in section 6.3.5.1 in the Ethernet
967 * Channel Engine manual (part of the PCIO manual).
968 * See also the STP2002-STQ document from Sun Microsystems.
971 /* step 1 & 2. Reset the Ethernet Channel. */
975 CTR2(KTR_CAS, "%s: %s: restarting", device_get_name(sc->sc_dev),
979 if ((sc->sc_flags & CAS_SERDES) == 0)
980 /* Re-initialize the MIF. */
983 /* step 3. Setup data structures in host memory. */
986 /* step 4. TX MAC registers & counters */
989 /* step 5. RX MAC registers & counters */
992 /* step 6 & 7. Program Ring Base Addresses. */
993 CAS_WRITE_4(sc, CAS_TX_DESC3_BASE_HI,
994 (((uint64_t)CAS_CDTXDADDR(sc, 0)) >> 32));
995 CAS_WRITE_4(sc, CAS_TX_DESC3_BASE_LO,
996 CAS_CDTXDADDR(sc, 0) & 0xffffffff);
998 CAS_WRITE_4(sc, CAS_RX_COMP_BASE_HI,
999 (((uint64_t)CAS_CDRXCADDR(sc, 0)) >> 32));
1000 CAS_WRITE_4(sc, CAS_RX_COMP_BASE_LO,
1001 CAS_CDRXCADDR(sc, 0) & 0xffffffff);
1003 CAS_WRITE_4(sc, CAS_RX_DESC_BASE_HI,
1004 (((uint64_t)CAS_CDRXDADDR(sc, 0)) >> 32));
1005 CAS_WRITE_4(sc, CAS_RX_DESC_BASE_LO,
1006 CAS_CDRXDADDR(sc, 0) & 0xffffffff);
1008 if ((sc->sc_flags & CAS_REG_PLUS) != 0) {
1009 CAS_WRITE_4(sc, CAS_RX_DESC2_BASE_HI,
1010 (((uint64_t)CAS_CDRXD2ADDR(sc, 0)) >> 32));
1011 CAS_WRITE_4(sc, CAS_RX_DESC2_BASE_LO,
1012 CAS_CDRXD2ADDR(sc, 0) & 0xffffffff);
1017 "loading TXDR %lx, RXCR %lx, RXDR %lx, RXD2R %lx, cddma %lx",
1018 CAS_CDTXDADDR(sc, 0), CAS_CDRXCADDR(sc, 0), CAS_CDRXDADDR(sc, 0),
1019 CAS_CDRXD2ADDR(sc, 0), sc->sc_cddma);
1022 /* step 8. Global Configuration & Interrupt Masks */
1024 /* Disable weighted round robin. */
1025 CAS_WRITE_4(sc, CAS_CAW, CAS_CAW_RR_DIS);
1028 * Enable infinite bursts for revisions without PCI issues if
1029 * applicable. Doing so greatly improves the TX performance on
1032 CAS_WRITE_4(sc, CAS_INF_BURST,
1033 #if !defined(__sparc64__)
1034 (sc->sc_flags & CAS_TABORT) == 0 ? CAS_INF_BURST_EN :
1038 /* Set up interrupts. */
1039 CAS_WRITE_4(sc, CAS_INTMASK,
1040 ~(CAS_INTR_TX_INT_ME | CAS_INTR_TX_TAG_ERR |
1041 CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_TAG_ERR |
1042 CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY |
1043 CAS_INTR_RX_COMP_AFULL | CAS_INTR_RX_LEN_MMATCH |
1044 CAS_INTR_PCI_ERROR_INT
1046 | CAS_INTR_PCS_INT | CAS_INTR_MIF
1049 /* Don't clear top level interrupts when CAS_STATUS_ALIAS is read. */
1050 CAS_WRITE_4(sc, CAS_CLEAR_ALIAS, 0);
1051 CAS_WRITE_4(sc, CAS_MAC_RX_MASK, ~CAS_MAC_RX_OVERFLOW);
1052 CAS_WRITE_4(sc, CAS_MAC_TX_MASK,
1053 ~(CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_MAX_PKT_ERR));
1055 CAS_WRITE_4(sc, CAS_MAC_CTRL_MASK,
1056 ~(CAS_MAC_CTRL_PAUSE_RCVD | CAS_MAC_CTRL_PAUSE |
1057 CAS_MAC_CTRL_NON_PAUSE));
1059 CAS_WRITE_4(sc, CAS_MAC_CTRL_MASK,
1060 CAS_MAC_CTRL_PAUSE_RCVD | CAS_MAC_CTRL_PAUSE |
1061 CAS_MAC_CTRL_NON_PAUSE);
1064 /* Enable PCI error interrupts. */
1065 CAS_WRITE_4(sc, CAS_ERROR_MASK,
1066 ~(CAS_ERROR_DTRTO | CAS_ERROR_OTHER | CAS_ERROR_DMAW_ZERO |
1067 CAS_ERROR_DMAR_ZERO | CAS_ERROR_RTRTO));
1069 /* Enable PCI error interrupts in BIM configuration. */
1070 CAS_WRITE_4(sc, CAS_BIM_CONF,
1071 CAS_BIM_CONF_DPAR_EN | CAS_BIM_CONF_RMA_EN | CAS_BIM_CONF_RTA_EN);
1074 * step 9. ETX Configuration: encode receive descriptor ring size,
1075 * enable DMA and disable pre-interrupt writeback completion.
1077 v = cas_descsize(CAS_NTXDESC) << CAS_TX_CONF_DESC3_SHFT;
1078 CAS_WRITE_4(sc, CAS_TX_CONF, v | CAS_TX_CONF_TXDMA_EN |
1079 CAS_TX_CONF_RDPP_DIS | CAS_TX_CONF_PICWB_DIS);
1081 /* step 10. ERX Configuration */
1084 * Encode receive completion and descriptor ring sizes, set the
1087 v = cas_rxcompsize(CAS_NRXCOMP) << CAS_RX_CONF_COMP_SHFT;
1088 v |= cas_descsize(CAS_NRXDESC) << CAS_RX_CONF_DESC_SHFT;
1089 if ((sc->sc_flags & CAS_REG_PLUS) != 0)
1090 v |= cas_descsize(CAS_NRXDESC2) << CAS_RX_CONF_DESC2_SHFT;
1091 CAS_WRITE_4(sc, CAS_RX_CONF,
1092 v | (ETHER_ALIGN << CAS_RX_CONF_SOFF_SHFT));
1094 /* Set the PAUSE thresholds. We use the maximum OFF threshold. */
1095 CAS_WRITE_4(sc, CAS_RX_PTHRS,
1096 (111 << CAS_RX_PTHRS_XOFF_SHFT) | (15 << CAS_RX_PTHRS_XON_SHFT));
1099 CAS_WRITE_4(sc, CAS_RX_BLANK,
1100 (15 << CAS_RX_BLANK_TIME_SHFT) | (5 << CAS_RX_BLANK_PKTS_SHFT));
1102 /* Set RX_COMP_AFULL threshold to half of the RX completions. */
1103 CAS_WRITE_4(sc, CAS_RX_AEMPTY_THRS,
1104 (CAS_NRXCOMP / 2) << CAS_RX_AEMPTY_COMP_SHFT);
1106 /* Initialize the RX page size register as appropriate for 8k. */
1107 CAS_WRITE_4(sc, CAS_RX_PSZ,
1108 (CAS_RX_PSZ_8K << CAS_RX_PSZ_SHFT) |
1109 (4 << CAS_RX_PSZ_MB_CNT_SHFT) |
1110 (CAS_RX_PSZ_MB_STRD_2K << CAS_RX_PSZ_MB_STRD_SHFT) |
1111 (CAS_RX_PSZ_MB_OFF_64 << CAS_RX_PSZ_MB_OFF_SHFT));
1113 /* Disable RX random early detection. */
1114 CAS_WRITE_4(sc, CAS_RX_RED, 0);
1116 /* Zero the RX reassembly DMA table. */
1117 for (v = 0; v <= CAS_RX_REAS_DMA_ADDR_LC; v++) {
1118 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_ADDR, v);
1119 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_LO, 0);
1120 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_MD, 0);
1121 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_HI, 0);
1124 /* Ensure the RX control FIFO and RX IPP FIFO addresses are zero. */
1125 CAS_WRITE_4(sc, CAS_RX_CTRL_FIFO, 0);
1126 CAS_WRITE_4(sc, CAS_RX_IPP_ADDR, 0);
1128 /* Finally, enable RX DMA. */
1129 CAS_WRITE_4(sc, CAS_RX_CONF,
1130 CAS_READ_4(sc, CAS_RX_CONF) | CAS_RX_CONF_RXDMA_EN);
1132 /* step 11. Configure Media. */
1134 /* step 12. RX_MAC Configuration Register */
1135 v = CAS_READ_4(sc, CAS_MAC_RX_CONF) & ~CAS_MAC_RX_CONF_STRPPAD;
1136 v |= CAS_MAC_RX_CONF_EN | CAS_MAC_RX_CONF_STRPFCS;
1137 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, 0);
1138 CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4,
1139 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1140 if (!cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_EN, 0))
1141 device_printf(sc->sc_dev, "cannot configure RX MAC\n");
1142 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v);
1144 /* step 13. TX_MAC Configuration Register */
1145 v = CAS_READ_4(sc, CAS_MAC_TX_CONF);
1146 v |= CAS_MAC_TX_CONF_EN;
1147 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, 0);
1148 CAS_BARRIER(sc, CAS_MAC_TX_CONF, 4,
1149 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1150 if (!cas_bitwait(sc, CAS_MAC_TX_CONF, CAS_MAC_TX_CONF_EN, 0))
1151 device_printf(sc->sc_dev, "cannot configure TX MAC\n");
1152 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, v);
1154 /* step 14. Issue Transmit Pending command. */
1156 /* step 15. Give the receiver a swift kick. */
1157 CAS_WRITE_4(sc, CAS_RX_KICK, CAS_NRXDESC - 4);
1158 CAS_WRITE_4(sc, CAS_RX_COMP_TAIL, 0);
1159 if ((sc->sc_flags & CAS_REG_PLUS) != 0)
1160 CAS_WRITE_4(sc, CAS_RX_KICK2, CAS_NRXDESC2 - 4);
1162 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1163 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1165 mii_mediachg(sc->sc_mii);
1167 /* Start the one second timer. */
1168 sc->sc_wdog_timer = 0;
1169 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc);
1173 cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head)
1175 bus_dma_segment_t txsegs[CAS_NTXSEGS];
1176 struct cas_txsoft *txs;
1180 int error, nexttx, nsegs, offset, seg;
1182 CAS_LOCK_ASSERT(sc, MA_OWNED);
1184 /* Get a work queue entry. */
1185 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1186 /* Ran out of descriptors. */
1191 if (((*m_head)->m_pkthdr.csum_flags & CAS_CSUM_FEATURES) != 0) {
1192 if (M_WRITABLE(*m_head) == 0) {
1193 m = m_dup(*m_head, M_DONTWAIT);
1199 offset = sizeof(struct ether_header);
1200 m = m_pullup(*m_head, offset + sizeof(struct ip));
1205 ip = (struct ip *)(mtod(m, caddr_t) + offset);
1206 offset += (ip->ip_hl << 2);
1207 cflags = (offset << CAS_TD_CKSUM_START_SHFT) |
1208 ((offset + m->m_pkthdr.csum_data) <<
1209 CAS_TD_CKSUM_STUFF_SHFT) | CAS_TD_CKSUM_EN;
1213 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1214 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1215 if (error == EFBIG) {
1216 m = m_collapse(*m_head, M_DONTWAIT, CAS_NTXSEGS);
1223 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag,
1224 txs->txs_dmamap, *m_head, txsegs, &nsegs,
1231 } else if (error != 0)
1233 /* If nsegs is wrong then the stack is corrupt. */
1234 KASSERT(nsegs <= CAS_NTXSEGS,
1235 ("%s: too many DMA segments (%d)", __func__, nsegs));
1243 * Ensure we have enough descriptors free to describe
1244 * the packet. Note, we always reserve one descriptor
1245 * at the end of the ring as a termination point, in
1246 * order to prevent wrap-around.
1248 if (nsegs > sc->sc_txfree - 1) {
1249 txs->txs_ndescs = 0;
1250 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1254 txs->txs_ndescs = nsegs;
1255 txs->txs_firstdesc = sc->sc_txnext;
1256 nexttx = txs->txs_firstdesc;
1257 for (seg = 0; seg < nsegs; seg++, nexttx = CAS_NEXTTX(nexttx)) {
1260 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)",
1261 __func__, seg, nexttx, txsegs[seg].ds_len,
1262 txsegs[seg].ds_addr, htole64(txsegs[seg].ds_addr));
1264 sc->sc_txdescs[nexttx].cd_buf_ptr =
1265 htole64(txsegs[seg].ds_addr);
1266 KASSERT(txsegs[seg].ds_len <
1267 CAS_TD_BUF_LEN_MASK >> CAS_TD_BUF_LEN_SHFT,
1268 ("%s: segment size too large!", __func__));
1269 sc->sc_txdescs[nexttx].cd_flags =
1270 htole64(txsegs[seg].ds_len << CAS_TD_BUF_LEN_SHFT);
1271 txs->txs_lastdesc = nexttx;
1274 /* Set EOF on the last descriptor. */
1276 CTR3(KTR_CAS, "%s: end of frame at segment %d, TX %d",
1277 __func__, seg, nexttx);
1279 sc->sc_txdescs[txs->txs_lastdesc].cd_flags |=
1280 htole64(CAS_TD_END_OF_FRAME);
1282 /* Lastly set SOF on the first descriptor. */
1284 CTR3(KTR_CAS, "%s: start of frame at segment %d, TX %d",
1285 __func__, seg, nexttx);
1287 if (sc->sc_txwin += nsegs > CAS_MAXTXFREE * 2 / 3) {
1289 sc->sc_txdescs[txs->txs_firstdesc].cd_flags |=
1290 htole64(cflags | CAS_TD_START_OF_FRAME | CAS_TD_INT_ME);
1292 sc->sc_txdescs[txs->txs_firstdesc].cd_flags |=
1293 htole64(cflags | CAS_TD_START_OF_FRAME);
1295 /* Sync the DMA map. */
1296 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1297 BUS_DMASYNC_PREWRITE);
1300 CTR4(KTR_CAS, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
1301 __func__, txs->txs_firstdesc, txs->txs_lastdesc,
1304 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1305 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1306 txs->txs_mbuf = *m_head;
1308 sc->sc_txnext = CAS_NEXTTX(txs->txs_lastdesc);
1309 sc->sc_txfree -= txs->txs_ndescs;
1315 cas_init_regs(struct cas_softc *sc)
1318 const u_char *laddr = IF_LLADDR(sc->sc_ifp);
1320 CAS_LOCK_ASSERT(sc, MA_OWNED);
1322 /* These registers are not cleared on reset. */
1323 if ((sc->sc_flags & CAS_INITED) == 0) {
1325 CAS_WRITE_4(sc, CAS_MAC_IPG0, 0);
1326 CAS_WRITE_4(sc, CAS_MAC_IPG1, 8);
1327 CAS_WRITE_4(sc, CAS_MAC_IPG2, 4);
1329 /* min frame length */
1330 CAS_WRITE_4(sc, CAS_MAC_MIN_FRAME, ETHER_MIN_LEN);
1331 /* max frame length and max burst size */
1332 CAS_WRITE_4(sc, CAS_MAC_MAX_BF,
1333 ((ETHER_MAX_LEN_JUMBO + ETHER_VLAN_ENCAP_LEN) <<
1334 CAS_MAC_MAX_BF_FRM_SHFT) |
1335 (0x2000 << CAS_MAC_MAX_BF_BST_SHFT));
1337 /* more magic values */
1338 CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x7);
1339 CAS_WRITE_4(sc, CAS_MAC_JAM_SIZE, 0x4);
1340 CAS_WRITE_4(sc, CAS_MAC_ATTEMPT_LIMIT, 0x10);
1341 CAS_WRITE_4(sc, CAS_MAC_CTRL_TYPE, 0x8808);
1343 /* random number seed */
1344 CAS_WRITE_4(sc, CAS_MAC_RANDOM_SEED,
1345 ((laddr[5] << 8) | laddr[4]) & 0x3ff);
1347 /* secondary MAC addresses: 0:0:0:0:0:0 */
1348 for (i = CAS_MAC_ADDR3; i <= CAS_MAC_ADDR41;
1349 i += CAS_MAC_ADDR4 - CAS_MAC_ADDR3)
1350 CAS_WRITE_4(sc, i, 0);
1352 /* MAC control address: 01:80:c2:00:00:01 */
1353 CAS_WRITE_4(sc, CAS_MAC_ADDR42, 0x0001);
1354 CAS_WRITE_4(sc, CAS_MAC_ADDR43, 0xc200);
1355 CAS_WRITE_4(sc, CAS_MAC_ADDR44, 0x0180);
1357 /* MAC filter address: 0:0:0:0:0:0 */
1358 CAS_WRITE_4(sc, CAS_MAC_AFILTER0, 0);
1359 CAS_WRITE_4(sc, CAS_MAC_AFILTER1, 0);
1360 CAS_WRITE_4(sc, CAS_MAC_AFILTER2, 0);
1361 CAS_WRITE_4(sc, CAS_MAC_AFILTER_MASK1_2, 0);
1362 CAS_WRITE_4(sc, CAS_MAC_AFILTER_MASK0, 0);
1364 /* Zero the hash table. */
1365 for (i = CAS_MAC_HASH0; i <= CAS_MAC_HASH15;
1366 i += CAS_MAC_HASH1 - CAS_MAC_HASH0)
1367 CAS_WRITE_4(sc, i, 0);
1369 sc->sc_flags |= CAS_INITED;
1372 /* Counters need to be zeroed. */
1373 CAS_WRITE_4(sc, CAS_MAC_NORM_COLL_CNT, 0);
1374 CAS_WRITE_4(sc, CAS_MAC_FIRST_COLL_CNT, 0);
1375 CAS_WRITE_4(sc, CAS_MAC_EXCESS_COLL_CNT, 0);
1376 CAS_WRITE_4(sc, CAS_MAC_LATE_COLL_CNT, 0);
1377 CAS_WRITE_4(sc, CAS_MAC_DEFER_TMR_CNT, 0);
1378 CAS_WRITE_4(sc, CAS_MAC_PEAK_ATTEMPTS, 0);
1379 CAS_WRITE_4(sc, CAS_MAC_RX_FRAME_COUNT, 0);
1380 CAS_WRITE_4(sc, CAS_MAC_RX_LEN_ERR_CNT, 0);
1381 CAS_WRITE_4(sc, CAS_MAC_RX_ALIGN_ERR, 0);
1382 CAS_WRITE_4(sc, CAS_MAC_RX_CRC_ERR_CNT, 0);
1383 CAS_WRITE_4(sc, CAS_MAC_RX_CODE_VIOL, 0);
1385 /* Set XOFF PAUSE time. */
1386 CAS_WRITE_4(sc, CAS_MAC_SPC, 0x1BF0 << CAS_MAC_SPC_TIME_SHFT);
1388 /* Set the station address. */
1389 CAS_WRITE_4(sc, CAS_MAC_ADDR0, (laddr[4] << 8) | laddr[5]);
1390 CAS_WRITE_4(sc, CAS_MAC_ADDR1, (laddr[2] << 8) | laddr[3]);
1391 CAS_WRITE_4(sc, CAS_MAC_ADDR2, (laddr[0] << 8) | laddr[1]);
1393 /* Enable MII outputs. */
1394 CAS_WRITE_4(sc, CAS_MAC_XIF_CONF, CAS_MAC_XIF_CONF_TX_OE);
1398 cas_tx_task(void *arg, int pending __unused)
1402 ifp = (struct ifnet *)arg;
1407 cas_txkick(struct cas_softc *sc)
1411 * Update the TX kick register. This register has to point to the
1412 * descriptor after the last valid one and for optimum performance
1413 * should be incremented in multiples of 4 (the DMA engine fetches/
1414 * updates descriptors in batches of 4).
1417 CTR3(KTR_CAS, "%s: %s: kicking TX %d",
1418 device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
1420 CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1421 CAS_WRITE_4(sc, CAS_TX_KICK3, sc->sc_txnext);
1425 cas_start(struct ifnet *ifp)
1427 struct cas_softc *sc = ifp->if_softc;
1433 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1434 IFF_DRV_RUNNING || (sc->sc_flags & CAS_LINK) == 0) {
1439 if (sc->sc_txfree < CAS_MAXTXFREE / 4)
1443 CTR4(KTR_CAS, "%s: %s: txfree %d, txnext %d",
1444 device_get_name(sc->sc_dev), __func__, sc->sc_txfree,
1449 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) {
1450 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1453 if (cas_load_txmbuf(sc, &m) != 0) {
1456 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1457 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1460 if ((sc->sc_txnext % 4) == 0) {
1473 CTR2(KTR_CAS, "%s: packets enqueued, OWN on %d",
1474 device_get_name(sc->sc_dev), sc->sc_txnext);
1477 /* Set a watchdog timer in case the chip flakes out. */
1478 sc->sc_wdog_timer = 5;
1480 CTR3(KTR_CAS, "%s: %s: watchdog %d",
1481 device_get_name(sc->sc_dev), __func__,
1490 cas_tint(struct cas_softc *sc)
1492 struct ifnet *ifp = sc->sc_ifp;
1493 struct cas_txsoft *txs;
1499 CAS_LOCK_ASSERT(sc, MA_OWNED);
1501 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__);
1505 * Go through our TX list and free mbufs for those
1506 * frames that have been transmitted.
1509 CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1510 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1512 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1513 printf(" txsoft %p transmit chain:\n", txs);
1514 for (i = txs->txs_firstdesc;; i = CAS_NEXTTX(i)) {
1515 printf("descriptor %d: ", i);
1516 printf("cd_flags: 0x%016llx\t",
1518 sc->sc_txdescs[i].cd_flags));
1519 printf("cd_buf_ptr: 0x%016llx\n",
1521 sc->sc_txdescs[i].cd_buf_ptr));
1522 if (i == txs->txs_lastdesc)
1529 * In theory, we could harvest some descriptors before
1530 * the ring is empty, but that's a bit complicated.
1532 * CAS_TX_COMPn points to the last descriptor
1535 txlast = CAS_READ_4(sc, CAS_TX_COMP3);
1537 CTR4(KTR_CAS, "%s: txs->txs_firstdesc = %d, "
1538 "txs->txs_lastdesc = %d, txlast = %d",
1539 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1541 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1542 if ((txlast >= txs->txs_firstdesc) &&
1543 (txlast <= txs->txs_lastdesc))
1546 /* Ick -- this command wraps. */
1547 if ((txlast >= txs->txs_firstdesc) ||
1548 (txlast <= txs->txs_lastdesc))
1553 CTR1(KTR_CAS, "%s: releasing a descriptor", __func__);
1555 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1557 sc->sc_txfree += txs->txs_ndescs;
1559 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1560 BUS_DMASYNC_POSTWRITE);
1561 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1562 if (txs->txs_mbuf != NULL) {
1563 m_freem(txs->txs_mbuf);
1564 txs->txs_mbuf = NULL;
1567 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1574 CTR5(KTR_CAS, "%s: CAS_TX_SM1 %x CAS_TX_SM2 %x CAS_TX_DESC_BASE %llx "
1576 __func__, CAS_READ_4(sc, CAS_TX_SM1), CAS_READ_4(sc, CAS_TX_SM2),
1577 ((long long)CAS_READ_4(sc, CAS_TX_DESC3_BASE_HI) << 32) |
1578 CAS_READ_4(sc, CAS_TX_DESC3_BASE_LO),
1579 CAS_READ_4(sc, CAS_TX_COMP3));
1583 /* We freed some descriptors, so reset IFF_DRV_OACTIVE. */
1584 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1585 if (STAILQ_EMPTY(&sc->sc_txdirtyq))
1586 sc->sc_wdog_timer = 0;
1590 CTR3(KTR_CAS, "%s: %s: watchdog %d",
1591 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1596 cas_rint_timeout(void *arg)
1598 struct cas_softc *sc = arg;
1600 CAS_LOCK_ASSERT(sc, MA_OWNED);
1606 cas_rint(struct cas_softc *sc)
1608 struct cas_rxdsoft *rxds, *rxds2;
1609 struct ifnet *ifp = sc->sc_ifp;
1610 struct mbuf *m, *m2;
1611 uint64_t word1, word2, word3, word4;
1613 u_int idx, idx2, len, off, skip;
1615 CAS_LOCK_ASSERT(sc, MA_OWNED);
1617 callout_stop(&sc->sc_rx_ch);
1620 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__);
1623 #define PRINTWORD(n, delimiter) \
1624 printf("word ## n: 0x%016llx%c", (long long)word ## n, delimiter)
1626 #define SKIPASSERT(n) \
1627 KASSERT(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n == 0, \
1628 ("%s: word ## n not 0", __func__))
1630 #define WORDTOH(n) \
1631 word ## n = le64toh(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n)
1634 * Read the completion head register once. This limits
1635 * how long the following loop can execute.
1637 rxhead = CAS_READ_4(sc, CAS_RX_COMP_HEAD);
1639 CTR4(KTR_CAS, "%s: sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d",
1640 __func__, sc->sc_rxcptr, sc->sc_rxdptr, rxhead);
1643 CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1644 for (; sc->sc_rxcptr != rxhead;
1645 sc->sc_rxcptr = CAS_NEXTRXCOMP(sc->sc_rxcptr)) {
1661 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1662 printf(" completion %d: ", sc->sc_rxcptr);
1670 if (__predict_false(
1671 (word1 & CAS_RC1_TYPE_MASK) == CAS_RC1_TYPE_HW ||
1672 (word4 & CAS_RC4_ZERO) != 0)) {
1674 * The descriptor is still marked as owned, although
1675 * it is supposed to have completed. This has been
1676 * observed on some machines. Just exiting here
1677 * might leave the packet sitting around until another
1678 * one arrives to trigger a new interrupt, which is
1679 * generally undesirable, so set up a timeout.
1681 callout_reset(&sc->sc_rx_ch, CAS_RXOWN_TICKS,
1682 cas_rint_timeout, sc);
1686 if (__predict_false(
1687 (word4 & (CAS_RC4_BAD | CAS_RC4_LEN_MMATCH)) != 0)) {
1689 device_printf(sc->sc_dev,
1690 "receive error: CRC error\n");
1694 KASSERT(CAS_GET(word1, CAS_RC1_DATA_SIZE) == 0 ||
1695 CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0,
1696 ("%s: data and header present", __func__));
1697 KASSERT((word1 & CAS_RC1_SPLIT_PKT) == 0 ||
1698 CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0,
1699 ("%s: split and header present", __func__));
1700 KASSERT(CAS_GET(word1, CAS_RC1_DATA_SIZE) == 0 ||
1701 (word1 & CAS_RC1_RELEASE_HDR) == 0,
1702 ("%s: data present but header release", __func__));
1703 KASSERT(CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0 ||
1704 (word1 & CAS_RC1_RELEASE_DATA) == 0,
1705 ("%s: header present but data release", __func__));
1707 if ((len = CAS_GET(word2, CAS_RC2_HDR_SIZE)) != 0) {
1708 idx = CAS_GET(word2, CAS_RC2_HDR_INDEX);
1709 off = CAS_GET(word2, CAS_RC2_HDR_OFF);
1711 CTR4(KTR_CAS, "%s: hdr at idx %d, off %d, len %d",
1712 __func__, idx, off, len);
1714 rxds = &sc->sc_rxdsoft[idx];
1715 MGETHDR(m, M_DONTWAIT, MT_DATA);
1717 refcount_acquire(&rxds->rxds_refcount);
1718 bus_dmamap_sync(sc->sc_rdmatag,
1719 rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD);
1720 #if __FreeBSD_version < 800016
1721 MEXTADD(m, (caddr_t)rxds->rxds_buf +
1722 off * 256 + ETHER_ALIGN, len, cas_free,
1723 rxds, M_RDONLY, EXT_NET_DRV);
1725 MEXTADD(m, (caddr_t)rxds->rxds_buf +
1726 off * 256 + ETHER_ALIGN, len, cas_free,
1727 sc, (void *)(uintptr_t)idx,
1728 M_RDONLY, EXT_NET_DRV);
1730 if ((m->m_flags & M_EXT) == 0) {
1736 m->m_pkthdr.rcvif = ifp;
1737 m->m_pkthdr.len = m->m_len = len;
1739 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1740 cas_rxcksum(m, CAS_GET(word4,
1744 (*ifp->if_input)(ifp, m);
1749 if ((word1 & CAS_RC1_RELEASE_HDR) != 0 &&
1750 refcount_release(&rxds->rxds_refcount) != 0)
1751 cas_add_rxdesc(sc, idx);
1752 } else if ((len = CAS_GET(word1, CAS_RC1_DATA_SIZE)) != 0) {
1753 idx = CAS_GET(word1, CAS_RC1_DATA_INDEX);
1754 off = CAS_GET(word1, CAS_RC1_DATA_OFF);
1756 CTR4(KTR_CAS, "%s: data at idx %d, off %d, len %d",
1757 __func__, idx, off, len);
1759 rxds = &sc->sc_rxdsoft[idx];
1760 MGETHDR(m, M_DONTWAIT, MT_DATA);
1762 refcount_acquire(&rxds->rxds_refcount);
1764 m->m_len = min(CAS_PAGE_SIZE - off, len);
1765 bus_dmamap_sync(sc->sc_rdmatag,
1766 rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD);
1767 #if __FreeBSD_version < 800016
1768 MEXTADD(m, (caddr_t)rxds->rxds_buf + off,
1769 m->m_len, cas_free, rxds, M_RDONLY,
1772 MEXTADD(m, (caddr_t)rxds->rxds_buf + off,
1773 m->m_len, cas_free, sc,
1774 (void *)(uintptr_t)idx, M_RDONLY,
1777 if ((m->m_flags & M_EXT) == 0) {
1785 if ((word1 & CAS_RC1_SPLIT_PKT) != 0) {
1786 KASSERT((word1 & CAS_RC1_RELEASE_NEXT) != 0,
1787 ("%s: split but no release next",
1790 idx2 = CAS_GET(word2, CAS_RC2_NEXT_INDEX);
1792 CTR2(KTR_CAS, "%s: split at idx %d",
1795 rxds2 = &sc->sc_rxdsoft[idx2];
1797 MGET(m2, M_DONTWAIT, MT_DATA);
1800 &rxds2->rxds_refcount);
1801 m2->m_len = len - m->m_len;
1805 BUS_DMASYNC_POSTREAD);
1806 #if __FreeBSD_version < 800016
1808 (caddr_t)rxds2->rxds_buf,
1809 m2->m_len, cas_free,
1814 (caddr_t)rxds2->rxds_buf,
1815 m2->m_len, cas_free, sc,
1816 (void *)(uintptr_t)idx2,
1817 M_RDONLY, EXT_NET_DRV);
1819 if ((m2->m_flags & M_EXT) ==
1828 else if (m != NULL) {
1834 m->m_pkthdr.rcvif = ifp;
1835 m->m_pkthdr.len = len;
1837 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1838 cas_rxcksum(m, CAS_GET(word4,
1842 (*ifp->if_input)(ifp, m);
1847 if ((word1 & CAS_RC1_RELEASE_DATA) != 0 &&
1848 refcount_release(&rxds->rxds_refcount) != 0)
1849 cas_add_rxdesc(sc, idx);
1850 if ((word1 & CAS_RC1_SPLIT_PKT) != 0 &&
1851 refcount_release(&rxds2->rxds_refcount) != 0)
1852 cas_add_rxdesc(sc, idx2);
1855 skip = CAS_GET(word1, CAS_RC1_SKIP);
1858 cas_rxcompinit(&sc->sc_rxcomps[sc->sc_rxcptr]);
1859 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1862 CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1863 CAS_WRITE_4(sc, CAS_RX_COMP_TAIL, sc->sc_rxcptr);
1870 CTR4(KTR_CAS, "%s: done sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d",
1871 __func__, sc->sc_rxcptr, sc->sc_rxdptr,
1872 CAS_READ_4(sc, CAS_RX_COMP_HEAD));
1877 cas_free(void *arg1, void *arg2)
1879 struct cas_rxdsoft *rxds;
1880 struct cas_softc *sc;
1883 #if __FreeBSD_version < 800016
1886 idx = rxds->rxds_idx;
1889 idx = (uintptr_t)arg2;
1890 rxds = &sc->sc_rxdsoft[idx];
1892 if (refcount_release(&rxds->rxds_refcount) == 0)
1896 * NB: this function can be called via m_freem(9) within
1899 if ((locked = CAS_LOCK_OWNED(sc)) == 0)
1901 cas_add_rxdesc(sc, idx);
1907 cas_add_rxdesc(struct cas_softc *sc, u_int idx)
1910 CAS_LOCK_ASSERT(sc, MA_OWNED);
1912 bus_dmamap_sync(sc->sc_rdmatag, sc->sc_rxdsoft[idx].rxds_dmamap,
1913 BUS_DMASYNC_PREREAD);
1914 CAS_UPDATE_RXDESC(sc, sc->sc_rxdptr, idx);
1915 sc->sc_rxdptr = CAS_NEXTRXDESC(sc->sc_rxdptr);
1918 * Update the RX kick register. This register has to point to the
1919 * descriptor after the last valid one (before the current batch)
1920 * and for optimum performance should be incremented in multiples
1921 * of 4 (the DMA engine fetches/updates descriptors in batches of 4).
1923 if ((sc->sc_rxdptr % 4) == 0) {
1924 CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1925 CAS_WRITE_4(sc, CAS_RX_KICK,
1926 (sc->sc_rxdptr + CAS_NRXDESC - 4) & CAS_NRXDESC_MASK);
1931 cas_eint(struct cas_softc *sc, u_int status)
1933 struct ifnet *ifp = sc->sc_ifp;
1935 CAS_LOCK_ASSERT(sc, MA_OWNED);
1939 device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status);
1940 if ((status & CAS_INTR_PCI_ERROR_INT) != 0) {
1941 status = CAS_READ_4(sc, CAS_ERROR_STATUS);
1942 printf(", PCI bus error 0x%x", status);
1943 if ((status & CAS_ERROR_OTHER) != 0) {
1944 status = pci_read_config(sc->sc_dev, PCIR_STATUS, 2);
1945 printf(", PCI status 0x%x", status);
1946 pci_write_config(sc->sc_dev, PCIR_STATUS, status, 2);
1951 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1952 cas_init_locked(sc);
1953 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1954 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
1960 struct cas_softc *sc = v;
1962 if (__predict_false((CAS_READ_4(sc, CAS_STATUS_ALIAS) &
1963 CAS_INTR_SUMMARY) == 0))
1964 return (FILTER_STRAY);
1966 /* Disable interrupts. */
1967 CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff);
1968 taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
1970 return (FILTER_HANDLED);
1974 cas_intr_task(void *arg, int pending __unused)
1976 struct cas_softc *sc = arg;
1977 struct ifnet *ifp = sc->sc_ifp;
1978 uint32_t status, status2;
1980 CAS_LOCK_ASSERT(sc, MA_NOTOWNED);
1982 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1985 status = CAS_READ_4(sc, CAS_STATUS);
1986 if (__predict_false((status & CAS_INTR_SUMMARY) == 0))
1991 CTR4(KTR_CAS, "%s: %s: cplt %x, status %x",
1992 device_get_name(sc->sc_dev), __func__,
1993 (status >> CAS_STATUS_TX_COMP3_SHFT), (u_int)status);
1996 * PCS interrupts must be cleared, otherwise no traffic is passed!
1998 if ((status & CAS_INTR_PCS_INT) != 0) {
2000 CAS_READ_4(sc, CAS_PCS_INTR_STATUS) |
2001 CAS_READ_4(sc, CAS_PCS_INTR_STATUS);
2002 if ((status2 & CAS_PCS_INTR_LINK) != 0)
2003 device_printf(sc->sc_dev,
2004 "%s: PCS link status changed\n", __func__);
2006 if ((status & CAS_MAC_CTRL_STATUS) != 0) {
2007 status2 = CAS_READ_4(sc, CAS_MAC_CTRL_STATUS);
2008 if ((status2 & CAS_MAC_CTRL_PAUSE) != 0)
2009 device_printf(sc->sc_dev,
2010 "%s: PAUSE received (PAUSE time %d slots)\n",
2012 (status2 & CAS_MAC_CTRL_STATUS_PT_MASK) >>
2013 CAS_MAC_CTRL_STATUS_PT_SHFT);
2014 if ((status2 & CAS_MAC_CTRL_PAUSE) != 0)
2015 device_printf(sc->sc_dev,
2016 "%s: transited to PAUSE state\n", __func__);
2017 if ((status2 & CAS_MAC_CTRL_NON_PAUSE) != 0)
2018 device_printf(sc->sc_dev,
2019 "%s: transited to non-PAUSE state\n", __func__);
2021 if ((status & CAS_INTR_MIF) != 0)
2022 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__);
2025 if (__predict_false((status &
2026 (CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_TAG_ERR |
2027 CAS_INTR_RX_LEN_MMATCH | CAS_INTR_PCI_ERROR_INT)) != 0)) {
2028 cas_eint(sc, status);
2033 if (__predict_false(status & CAS_INTR_TX_MAC_INT)) {
2034 status2 = CAS_READ_4(sc, CAS_MAC_TX_STATUS);
2036 (CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_MAX_PKT_ERR)) != 0)
2038 else if ((status2 & ~CAS_MAC_TX_FRAME_XMTD) != 0)
2039 device_printf(sc->sc_dev,
2040 "MAC TX fault, status %x\n", status2);
2043 if (__predict_false(status & CAS_INTR_RX_MAC_INT)) {
2044 status2 = CAS_READ_4(sc, CAS_MAC_RX_STATUS);
2045 if ((status2 & CAS_MAC_RX_OVERFLOW) != 0)
2047 else if ((status2 & ~CAS_MAC_RX_FRAME_RCVD) != 0)
2048 device_printf(sc->sc_dev,
2049 "MAC RX fault, status %x\n", status2);
2053 (CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL |
2054 CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL)) != 0) {
2057 if (__predict_false((status &
2058 (CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL |
2059 CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL)) != 0))
2060 device_printf(sc->sc_dev,
2061 "RX fault, status %x\n", status);
2066 (CAS_INTR_TX_INT_ME | CAS_INTR_TX_ALL | CAS_INTR_TX_DONE)) != 0)
2069 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2072 } else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2073 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
2076 status = CAS_READ_4(sc, CAS_STATUS_ALIAS);
2077 if (__predict_false((status & CAS_INTR_SUMMARY) != 0)) {
2078 taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
2083 /* Re-enable interrupts. */
2084 CAS_WRITE_4(sc, CAS_INTMASK,
2085 ~(CAS_INTR_TX_INT_ME | CAS_INTR_TX_TAG_ERR |
2086 CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_TAG_ERR |
2087 CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY |
2088 CAS_INTR_RX_COMP_AFULL | CAS_INTR_RX_LEN_MMATCH |
2089 CAS_INTR_PCI_ERROR_INT
2091 | CAS_INTR_PCS_INT | CAS_INTR_MIF
2097 cas_watchdog(struct cas_softc *sc)
2099 struct ifnet *ifp = sc->sc_ifp;
2101 CAS_LOCK_ASSERT(sc, MA_OWNED);
2105 "%s: CAS_RX_CONF %x CAS_MAC_RX_STATUS %x CAS_MAC_RX_CONF %x",
2106 __func__, CAS_READ_4(sc, CAS_RX_CONF),
2107 CAS_READ_4(sc, CAS_MAC_RX_STATUS),
2108 CAS_READ_4(sc, CAS_MAC_RX_CONF));
2110 "%s: CAS_TX_CONF %x CAS_MAC_TX_STATUS %x CAS_MAC_TX_CONF %x",
2111 __func__, CAS_READ_4(sc, CAS_TX_CONF),
2112 CAS_READ_4(sc, CAS_MAC_TX_STATUS),
2113 CAS_READ_4(sc, CAS_MAC_TX_CONF));
2116 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
2119 if ((sc->sc_flags & CAS_LINK) != 0)
2120 device_printf(sc->sc_dev, "device timeout\n");
2121 else if (bootverbose)
2122 device_printf(sc->sc_dev, "device timeout (no link)\n");
2125 /* Try to get more packets going. */
2126 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2127 cas_init_locked(sc);
2128 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2129 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
2133 cas_mifinit(struct cas_softc *sc)
2136 /* Configure the MIF in frame mode. */
2137 CAS_WRITE_4(sc, CAS_MIF_CONF,
2138 CAS_READ_4(sc, CAS_MIF_CONF) & ~CAS_MIF_CONF_BB_MODE);
2139 CAS_BARRIER(sc, CAS_MIF_CONF, 4,
2140 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2146 * The MII interface supports at least three different operating modes:
2148 * Bitbang mode is implemented using data, clock and output enable registers.
2150 * Frame mode is implemented by loading a complete frame into the frame
2151 * register and polling the valid bit for completion.
2153 * Polling mode uses the frame register but completion is indicated by
2158 cas_mii_readreg(device_t dev, int phy, int reg)
2160 struct cas_softc *sc;
2164 #ifdef CAS_DEBUG_PHY
2165 printf("%s: phy %d reg %d\n", __func__, phy, reg);
2168 sc = device_get_softc(dev);
2169 if ((sc->sc_flags & CAS_SERDES) != 0) {
2175 reg = CAS_PCS_STATUS;
2184 reg = CAS_PCS_ANLPAR;
2187 return (EXTSR_1000XFDX | EXTSR_1000XHDX);
2189 device_printf(sc->sc_dev,
2190 "%s: unhandled register %d\n", __func__, reg);
2193 return (CAS_READ_4(sc, reg));
2196 /* Construct the frame command. */
2197 v = CAS_MIF_FRAME_READ |
2198 (phy << CAS_MIF_FRAME_PHY_SHFT) |
2199 (reg << CAS_MIF_FRAME_REG_SHFT);
2201 CAS_WRITE_4(sc, CAS_MIF_FRAME, v);
2202 CAS_BARRIER(sc, CAS_MIF_FRAME, 4,
2203 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2204 for (n = 0; n < 100; n++) {
2206 v = CAS_READ_4(sc, CAS_MIF_FRAME);
2207 if (v & CAS_MIF_FRAME_TA_LSB)
2208 return (v & CAS_MIF_FRAME_DATA);
2211 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
2216 cas_mii_writereg(device_t dev, int phy, int reg, int val)
2218 struct cas_softc *sc;
2222 #ifdef CAS_DEBUG_PHY
2223 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__);
2226 sc = device_get_softc(dev);
2227 if ((sc->sc_flags & CAS_SERDES) != 0) {
2230 reg = CAS_PCS_STATUS;
2234 if ((val & CAS_PCS_CTRL_RESET) == 0)
2236 CAS_WRITE_4(sc, CAS_PCS_CTRL, val);
2237 CAS_BARRIER(sc, CAS_PCS_CTRL, 4,
2238 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2239 if (!cas_bitwait(sc, CAS_PCS_CTRL,
2240 CAS_PCS_CTRL_RESET, 0))
2241 device_printf(sc->sc_dev,
2242 "cannot reset PCS\n");
2245 CAS_WRITE_4(sc, CAS_PCS_CONF, 0);
2246 CAS_BARRIER(sc, CAS_PCS_CONF, 4,
2247 BUS_SPACE_BARRIER_WRITE);
2248 CAS_WRITE_4(sc, CAS_PCS_ANAR, val);
2249 CAS_BARRIER(sc, CAS_PCS_ANAR, 4,
2250 BUS_SPACE_BARRIER_WRITE);
2251 CAS_WRITE_4(sc, CAS_PCS_SERDES_CTRL,
2252 CAS_PCS_SERDES_CTRL_ESD);
2253 CAS_BARRIER(sc, CAS_PCS_CONF, 4,
2254 BUS_SPACE_BARRIER_WRITE);
2255 CAS_WRITE_4(sc, CAS_PCS_CONF,
2257 CAS_BARRIER(sc, CAS_PCS_CONF, 4,
2258 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2261 reg = CAS_PCS_ANLPAR;
2264 device_printf(sc->sc_dev,
2265 "%s: unhandled register %d\n", __func__, reg);
2268 CAS_WRITE_4(sc, reg, val);
2269 CAS_BARRIER(sc, reg, 4,
2270 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2274 /* Construct the frame command. */
2275 v = CAS_MIF_FRAME_WRITE |
2276 (phy << CAS_MIF_FRAME_PHY_SHFT) |
2277 (reg << CAS_MIF_FRAME_REG_SHFT) |
2278 (val & CAS_MIF_FRAME_DATA);
2280 CAS_WRITE_4(sc, CAS_MIF_FRAME, v);
2281 CAS_BARRIER(sc, CAS_MIF_FRAME, 4,
2282 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2283 for (n = 0; n < 100; n++) {
2285 v = CAS_READ_4(sc, CAS_MIF_FRAME);
2286 if (v & CAS_MIF_FRAME_TA_LSB)
2290 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
2295 cas_mii_statchg(device_t dev)
2297 struct cas_softc *sc;
2300 uint32_t rxcfg, txcfg, v;
2302 sc = device_get_softc(dev);
2305 CAS_LOCK_ASSERT(sc, MA_OWNED);
2308 if ((ifp->if_flags & IFF_DEBUG) != 0)
2309 device_printf(sc->sc_dev, "%s: status changen", __func__);
2312 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
2313 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
2314 sc->sc_flags |= CAS_LINK;
2316 sc->sc_flags &= ~CAS_LINK;
2318 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) {
2330 * The configuration done here corresponds to the steps F) and
2331 * G) and as far as enabling of RX and TX MAC goes also step H)
2332 * of the initialization sequence outlined in section 11.2.1 of
2333 * the Cassini+ ASIC Specification.
2336 rxcfg = CAS_READ_4(sc, CAS_MAC_RX_CONF);
2337 rxcfg &= ~(CAS_MAC_RX_CONF_EN | CAS_MAC_RX_CONF_CARR);
2338 txcfg = CAS_MAC_TX_CONF_EN_IPG0 | CAS_MAC_TX_CONF_NGU |
2339 CAS_MAC_TX_CONF_NGUL;
2340 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2341 txcfg |= CAS_MAC_TX_CONF_ICARR | CAS_MAC_TX_CONF_ICOLLIS;
2342 else if (gigabit != 0) {
2343 rxcfg |= CAS_MAC_RX_CONF_CARR;
2344 txcfg |= CAS_MAC_TX_CONF_CARR;
2346 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, 0);
2347 CAS_BARRIER(sc, CAS_MAC_TX_CONF, 4,
2348 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2349 if (!cas_bitwait(sc, CAS_MAC_TX_CONF, CAS_MAC_TX_CONF_EN, 0))
2350 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
2351 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, txcfg);
2352 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, 0);
2353 CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4,
2354 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2355 if (!cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_EN, 0))
2356 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
2357 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, rxcfg);
2359 v = CAS_READ_4(sc, CAS_MAC_CTRL_CONF) &
2360 ~(CAS_MAC_CTRL_CONF_TXP | CAS_MAC_CTRL_CONF_RXP);
2361 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2362 IFM_ETH_RXPAUSE) != 0)
2363 v |= CAS_MAC_CTRL_CONF_RXP;
2364 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2365 IFM_ETH_TXPAUSE) != 0)
2366 v |= CAS_MAC_CTRL_CONF_TXP;
2367 CAS_WRITE_4(sc, CAS_MAC_CTRL_CONF, v);
2370 * All supported chips have a bug causing incorrect checksum
2371 * to be calculated when letting them strip the FCS in half-
2372 * duplex mode. In theory we could disable FCS stripping and
2373 * manually adjust the checksum accordingly. It seems to make
2374 * more sense to optimze for the common case and just disable
2375 * hardware checksumming in half-duplex mode though.
2377 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) {
2378 ifp->if_capenable &= ~IFCAP_HWCSUM;
2379 ifp->if_hwassist = 0;
2380 } else if ((sc->sc_flags & CAS_NO_CSUM) == 0) {
2381 ifp->if_capenable = ifp->if_capabilities;
2382 ifp->if_hwassist = CAS_CSUM_FEATURES;
2385 if (sc->sc_variant == CAS_SATURN) {
2386 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0)
2387 /* silicon bug workaround */
2388 CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x41);
2390 CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x7);
2393 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 &&
2395 CAS_WRITE_4(sc, CAS_MAC_SLOT_TIME,
2396 CAS_MAC_SLOT_TIME_CARR);
2398 CAS_WRITE_4(sc, CAS_MAC_SLOT_TIME,
2399 CAS_MAC_SLOT_TIME_NORM);
2401 /* XIF Configuration */
2402 v = CAS_MAC_XIF_CONF_TX_OE | CAS_MAC_XIF_CONF_LNKLED;
2403 if ((sc->sc_flags & CAS_SERDES) == 0) {
2404 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0)
2405 v |= CAS_MAC_XIF_CONF_NOECHO;
2406 v |= CAS_MAC_XIF_CONF_BUF_OE;
2409 v |= CAS_MAC_XIF_CONF_GMII;
2410 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2411 v |= CAS_MAC_XIF_CONF_FDXLED;
2412 CAS_WRITE_4(sc, CAS_MAC_XIF_CONF, v);
2414 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2415 (sc->sc_flags & CAS_LINK) != 0) {
2416 CAS_WRITE_4(sc, CAS_MAC_TX_CONF,
2417 txcfg | CAS_MAC_TX_CONF_EN);
2418 CAS_WRITE_4(sc, CAS_MAC_RX_CONF,
2419 rxcfg | CAS_MAC_RX_CONF_EN);
2424 cas_mediachange(struct ifnet *ifp)
2426 struct cas_softc *sc = ifp->if_softc;
2429 /* XXX add support for serial media. */
2432 error = mii_mediachg(sc->sc_mii);
2438 cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2440 struct cas_softc *sc = ifp->if_softc;
2443 if ((ifp->if_flags & IFF_UP) == 0) {
2448 mii_pollstat(sc->sc_mii);
2449 ifmr->ifm_active = sc->sc_mii->mii_media_active;
2450 ifmr->ifm_status = sc->sc_mii->mii_media_status;
2455 cas_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2457 struct cas_softc *sc = ifp->if_softc;
2458 struct ifreq *ifr = (struct ifreq *)data;
2465 if ((ifp->if_flags & IFF_UP) != 0) {
2466 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2467 ((ifp->if_flags ^ sc->sc_ifflags) &
2468 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
2471 cas_init_locked(sc);
2472 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2474 sc->sc_ifflags = ifp->if_flags;
2479 if ((sc->sc_flags & CAS_NO_CSUM) != 0) {
2484 ifp->if_capenable = ifr->ifr_reqcap;
2485 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2486 ifp->if_hwassist = CAS_CSUM_FEATURES;
2488 ifp->if_hwassist = 0;
2494 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2499 if ((ifr->ifr_mtu < ETHERMIN) ||
2500 (ifr->ifr_mtu > ETHERMTU_JUMBO))
2503 ifp->if_mtu = ifr->ifr_mtu;
2507 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
2510 error = ether_ioctl(ifp, cmd, data);
2518 cas_setladrf(struct cas_softc *sc)
2520 struct ifnet *ifp = sc->sc_ifp;
2521 struct ifmultiaddr *inm;
2526 CAS_LOCK_ASSERT(sc, MA_OWNED);
2528 /* Get the current RX configuration. */
2529 v = CAS_READ_4(sc, CAS_MAC_RX_CONF);
2532 * Turn off promiscuous mode, promiscuous group mode (all multicast),
2533 * and hash filter. Depending on the case, the right bit will be
2536 v &= ~(CAS_MAC_RX_CONF_PROMISC | CAS_MAC_RX_CONF_HFILTER |
2537 CAS_MAC_RX_CONF_PGRP);
2539 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v);
2540 CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4,
2541 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2542 if (!cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_HFILTER, 0))
2543 device_printf(sc->sc_dev, "cannot disable RX hash filter\n");
2545 if ((ifp->if_flags & IFF_PROMISC) != 0) {
2546 v |= CAS_MAC_RX_CONF_PROMISC;
2549 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
2550 v |= CAS_MAC_RX_CONF_PGRP;
2555 * Set up multicast address filter by passing all multicast
2556 * addresses through a crc generator, and then using the high
2557 * order 8 bits as an index into the 256 bit logical address
2558 * filter. The high order 4 bits selects the word, while the
2559 * other 4 bits select the bit within the word (where bit 0
2563 /* Clear the hash table. */
2564 memset(hash, 0, sizeof(hash));
2566 if_maddr_rlock(ifp);
2567 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
2568 if (inm->ifma_addr->sa_family != AF_LINK)
2570 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
2571 inm->ifma_addr), ETHER_ADDR_LEN);
2573 /* We just want the 8 most significant bits. */
2576 /* Set the corresponding bit in the filter. */
2577 hash[crc >> 4] |= 1 << (15 - (crc & 15));
2579 if_maddr_runlock(ifp);
2581 v |= CAS_MAC_RX_CONF_HFILTER;
2583 /* Now load the hash table into the chip (if we are using it). */
2584 for (i = 0; i < 16; i++)
2586 CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0),
2590 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v);
2593 static int cas_pci_attach(device_t dev);
2594 static int cas_pci_detach(device_t dev);
2595 static int cas_pci_probe(device_t dev);
2596 static int cas_pci_resume(device_t dev);
2597 static int cas_pci_suspend(device_t dev);
2599 static device_method_t cas_pci_methods[] = {
2600 /* Device interface */
2601 DEVMETHOD(device_probe, cas_pci_probe),
2602 DEVMETHOD(device_attach, cas_pci_attach),
2603 DEVMETHOD(device_detach, cas_pci_detach),
2604 DEVMETHOD(device_suspend, cas_pci_suspend),
2605 DEVMETHOD(device_resume, cas_pci_resume),
2606 /* Use the suspend handler here, it is all that is required. */
2607 DEVMETHOD(device_shutdown, cas_pci_suspend),
2610 DEVMETHOD(bus_print_child, bus_generic_print_child),
2611 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
2614 DEVMETHOD(miibus_readreg, cas_mii_readreg),
2615 DEVMETHOD(miibus_writereg, cas_mii_writereg),
2616 DEVMETHOD(miibus_statchg, cas_mii_statchg),
2621 static driver_t cas_pci_driver = {
2624 sizeof(struct cas_softc)
2627 DRIVER_MODULE(cas, pci, cas_pci_driver, cas_devclass, 0, 0);
2628 DRIVER_MODULE(miibus, cas, miibus_driver, miibus_devclass, 0, 0);
2629 MODULE_DEPEND(cas, pci, 1, 1, 1);
2631 static const struct cas_pci_dev {
2635 const char *cpd_desc;
2636 } const cas_pci_devlist[] = {
2637 { 0x0035100b, 0x0, CAS_SATURN, "NS DP83065 Saturn Gigabit Ethernet" },
2638 { 0xabba108e, 0x10, CAS_CASPLUS, "Sun Cassini+ Gigabit Ethernet" },
2639 { 0xabba108e, 0x0, CAS_CAS, "Sun Cassini Gigabit Ethernet" },
2644 cas_pci_probe(device_t dev)
2648 for (i = 0; cas_pci_devlist[i].cpd_desc != NULL; i++) {
2649 if (pci_get_devid(dev) == cas_pci_devlist[i].cpd_devid &&
2650 pci_get_revid(dev) >= cas_pci_devlist[i].cpd_revid) {
2651 device_set_desc(dev, cas_pci_devlist[i].cpd_desc);
2652 return (BUS_PROBE_DEFAULT);
2659 static struct resource_spec cas_pci_res_spec[] = {
2660 { SYS_RES_IRQ, 0, RF_SHAREABLE | RF_ACTIVE }, /* CAS_RES_INTR */
2661 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, /* CAS_RES_MEM */
2665 #define CAS_LOCAL_MAC_ADDRESS "local-mac-address"
2666 #define CAS_PHY_INTERFACE "phy-interface"
2667 #define CAS_PHY_TYPE "phy-type"
2668 #define CAS_PHY_TYPE_PCS "pcs"
2671 cas_pci_attach(device_t dev)
2673 char buf[sizeof(CAS_LOCAL_MAC_ADDRESS)];
2674 struct cas_softc *sc;
2676 #if !(defined(__powerpc__) || defined(__sparc64__))
2677 u_char enaddr[4][ETHER_ADDR_LEN];
2678 u_int j, k, lma, pcs[4], phy;
2681 sc = device_get_softc(dev);
2682 sc->sc_variant = CAS_UNKNOWN;
2683 for (i = 0; cas_pci_devlist[i].cpd_desc != NULL; i++) {
2684 if (pci_get_devid(dev) == cas_pci_devlist[i].cpd_devid &&
2685 pci_get_revid(dev) >= cas_pci_devlist[i].cpd_revid) {
2686 sc->sc_variant = cas_pci_devlist[i].cpd_variant;
2690 if (sc->sc_variant == CAS_UNKNOWN) {
2691 device_printf(dev, "unknown adaptor\n");
2695 pci_enable_busmaster(dev);
2698 if (sc->sc_variant == CAS_CAS && pci_get_devid(dev) < 0x02)
2699 /* Hardware checksumming may hang TX. */
2700 sc->sc_flags |= CAS_NO_CSUM;
2701 if (sc->sc_variant == CAS_CASPLUS || sc->sc_variant == CAS_SATURN)
2702 sc->sc_flags |= CAS_REG_PLUS;
2703 if (sc->sc_variant == CAS_CAS ||
2704 (sc->sc_variant == CAS_CASPLUS && pci_get_revid(dev) < 0x11))
2705 sc->sc_flags |= CAS_TABORT;
2707 device_printf(dev, "flags=0x%x\n", sc->sc_flags);
2709 if (bus_alloc_resources(dev, cas_pci_res_spec, sc->sc_res)) {
2710 device_printf(dev, "failed to allocate resources\n");
2711 bus_release_resources(dev, cas_pci_res_spec, sc->sc_res);
2715 CAS_LOCK_INIT(sc, device_get_nameunit(dev));
2717 #if defined(__powerpc__) || defined(__sparc64__)
2718 OF_getetheraddr(dev, sc->sc_enaddr);
2719 if (OF_getprop(ofw_bus_get_node(dev), CAS_PHY_INTERFACE, buf,
2720 sizeof(buf)) > 0 || OF_getprop(ofw_bus_get_node(dev),
2721 CAS_PHY_TYPE, buf, sizeof(buf)) > 0) {
2722 buf[sizeof(buf) - 1] = '\0';
2723 if (strcmp(buf, CAS_PHY_TYPE_PCS) == 0)
2724 sc->sc_flags |= CAS_SERDES;
2728 * Dig out VPD (vital product data) and read the MAC address as well
2729 * as the PHY type. The VPD resides in the PCI Expansion ROM (PCI
2730 * FCode) and can't be accessed via the PCI capability pointer.
2731 * SUNW,pci-ce and SUNW,pci-qge use the Enhanced VPD format described
2732 * in the free US Patent 7149820.
2735 #define PCI_ROMHDR_SIZE 0x1c
2736 #define PCI_ROMHDR_SIG 0x00
2737 #define PCI_ROMHDR_SIG_MAGIC 0xaa55 /* little endian */
2738 #define PCI_ROMHDR_PTR_DATA 0x18
2739 #define PCI_ROM_SIZE 0x18
2740 #define PCI_ROM_SIG 0x00
2741 #define PCI_ROM_SIG_MAGIC 0x52494350 /* "PCIR", endian */
2743 #define PCI_ROM_VENDOR 0x04
2744 #define PCI_ROM_DEVICE 0x06
2745 #define PCI_ROM_PTR_VPD 0x08
2746 #define PCI_VPDRES_BYTE0 0x00
2747 #define PCI_VPDRES_ISLARGE(x) ((x) & 0x80)
2748 #define PCI_VPDRES_LARGE_NAME(x) ((x) & 0x7f)
2749 #define PCI_VPDRES_LARGE_LEN_LSB 0x01
2750 #define PCI_VPDRES_LARGE_LEN_MSB 0x02
2751 #define PCI_VPDRES_LARGE_SIZE 0x03
2752 #define PCI_VPDRES_TYPE_ID_STRING 0x02 /* large */
2753 #define PCI_VPDRES_TYPE_VPD 0x10 /* large */
2754 #define PCI_VPD_KEY0 0x00
2755 #define PCI_VPD_KEY1 0x01
2756 #define PCI_VPD_LEN 0x02
2757 #define PCI_VPD_SIZE 0x03
2759 #define CAS_ROM_READ_1(sc, offs) \
2760 CAS_READ_1((sc), CAS_PCI_ROM_OFFSET + (offs))
2761 #define CAS_ROM_READ_2(sc, offs) \
2762 CAS_READ_2((sc), CAS_PCI_ROM_OFFSET + (offs))
2763 #define CAS_ROM_READ_4(sc, offs) \
2764 CAS_READ_4((sc), CAS_PCI_ROM_OFFSET + (offs))
2767 memset(enaddr, 0, sizeof(enaddr));
2768 memset(pcs, 0, sizeof(pcs));
2770 /* Enable PCI Expansion ROM access. */
2771 CAS_WRITE_4(sc, CAS_BIM_LDEV_OEN,
2772 CAS_BIM_LDEV_OEN_PAD | CAS_BIM_LDEV_OEN_PROM);
2774 /* Read PCI Expansion ROM header. */
2775 if (CAS_ROM_READ_2(sc, PCI_ROMHDR_SIG) != PCI_ROMHDR_SIG_MAGIC ||
2776 (i = CAS_ROM_READ_2(sc, PCI_ROMHDR_PTR_DATA)) <
2778 device_printf(dev, "unexpected PCI Expansion ROM header\n");
2782 /* Read PCI Expansion ROM data. */
2783 if (CAS_ROM_READ_4(sc, i + PCI_ROM_SIG) != PCI_ROM_SIG_MAGIC ||
2784 CAS_ROM_READ_2(sc, i + PCI_ROM_VENDOR) != pci_get_vendor(dev) ||
2785 CAS_ROM_READ_2(sc, i + PCI_ROM_DEVICE) != pci_get_device(dev) ||
2786 (j = CAS_ROM_READ_2(sc, i + PCI_ROM_PTR_VPD)) <
2788 device_printf(dev, "unexpected PCI Expansion ROM data\n");
2794 if (PCI_VPDRES_ISLARGE(CAS_ROM_READ_1(sc,
2795 j + PCI_VPDRES_BYTE0)) == 0) {
2796 device_printf(dev, "no large PCI VPD\n");
2800 i = (CAS_ROM_READ_1(sc, j + PCI_VPDRES_LARGE_LEN_MSB) << 8) |
2801 CAS_ROM_READ_1(sc, j + PCI_VPDRES_LARGE_LEN_LSB);
2802 switch (PCI_VPDRES_LARGE_NAME(CAS_ROM_READ_1(sc,
2803 j + PCI_VPDRES_BYTE0))) {
2804 case PCI_VPDRES_TYPE_ID_STRING:
2805 /* Skip identifier string. */
2806 j += PCI_VPDRES_LARGE_SIZE + i;
2808 case PCI_VPDRES_TYPE_VPD:
2809 for (j += PCI_VPDRES_LARGE_SIZE; i > 0;
2810 i -= PCI_VPD_SIZE + CAS_ROM_READ_1(sc, j + PCI_VPD_LEN),
2811 j += PCI_VPD_SIZE + CAS_ROM_READ_1(sc, j + PCI_VPD_LEN)) {
2812 if (CAS_ROM_READ_1(sc, j + PCI_VPD_KEY0) != 'Z')
2813 /* no Enhanced VPD */
2815 if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE) != 'I')
2816 /* no instance property */
2818 if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 3) == 'B') {
2820 if (CAS_ROM_READ_1(sc,
2821 j + PCI_VPD_SIZE + 4) != ETHER_ADDR_LEN)
2823 bus_read_region_1(sc->sc_res[CAS_RES_MEM],
2824 CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5,
2826 buf[sizeof(buf) - 1] = '\0';
2827 if (strcmp(buf, CAS_LOCAL_MAC_ADDRESS) != 0)
2829 bus_read_region_1(sc->sc_res[CAS_RES_MEM],
2830 CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE +
2831 5 + sizeof(CAS_LOCAL_MAC_ADDRESS),
2832 enaddr[lma], sizeof(enaddr[lma]));
2834 if (lma == 4 && phy == 4)
2836 } else if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 3) ==
2839 if (CAS_ROM_READ_1(sc,
2840 j + PCI_VPD_SIZE + 4) !=
2841 sizeof(CAS_PHY_TYPE_PCS))
2843 bus_read_region_1(sc->sc_res[CAS_RES_MEM],
2844 CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5,
2846 buf[sizeof(buf) - 1] = '\0';
2847 if (strcmp(buf, CAS_PHY_INTERFACE) == 0)
2848 k = sizeof(CAS_PHY_INTERFACE);
2849 else if (strcmp(buf, CAS_PHY_TYPE) == 0)
2850 k = sizeof(CAS_PHY_TYPE);
2853 bus_read_region_1(sc->sc_res[CAS_RES_MEM],
2854 CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE +
2855 5 + k, buf, sizeof(buf));
2856 buf[sizeof(buf) - 1] = '\0';
2857 if (strcmp(buf, CAS_PHY_TYPE_PCS) == 0)
2860 if (lma == 4 && phy == 4)
2866 device_printf(dev, "unexpected PCI VPD\n");
2871 CAS_WRITE_4(sc, CAS_BIM_LDEV_OEN, 0);
2874 device_printf(dev, "could not determine Ethernet address\n");
2878 if (lma > 1 && pci_get_slot(dev) < sizeof(enaddr) / sizeof(*enaddr))
2879 i = pci_get_slot(dev);
2880 memcpy(sc->sc_enaddr, enaddr[i], ETHER_ADDR_LEN);
2883 device_printf(dev, "could not determine PHY type\n");
2887 if (phy > 1 && pci_get_slot(dev) < sizeof(pcs) / sizeof(*pcs))
2888 i = pci_get_slot(dev);
2890 sc->sc_flags |= CAS_SERDES;
2893 if (cas_attach(sc) != 0) {
2894 device_printf(dev, "could not be attached\n");
2898 if (bus_setup_intr(dev, sc->sc_res[CAS_RES_INTR], INTR_TYPE_NET |
2899 INTR_MPSAFE, cas_intr, NULL, sc, &sc->sc_ih) != 0) {
2900 device_printf(dev, "failed to set up interrupt\n");
2907 CAS_LOCK_DESTROY(sc);
2908 bus_release_resources(dev, cas_pci_res_spec, sc->sc_res);
2913 cas_pci_detach(device_t dev)
2915 struct cas_softc *sc;
2917 sc = device_get_softc(dev);
2918 bus_teardown_intr(dev, sc->sc_res[CAS_RES_INTR], sc->sc_ih);
2920 CAS_LOCK_DESTROY(sc);
2921 bus_release_resources(dev, cas_pci_res_spec, sc->sc_res);
2926 cas_pci_suspend(device_t dev)
2929 cas_suspend(device_get_softc(dev));
2934 cas_pci_resume(device_t dev)
2937 cas_resume(device_get_softc(dev));