1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */
4 * Copyright (c) 1997, 1998, 1999, 2000
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
57 * The XaQti XMAC II datasheet,
58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 * The SysKonnect GEnesis manual, http://www.syskonnect.com
61 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
72 * The SysKonnect gigabit ethernet adapters consist of two main
73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
75 * components and a PHY while the GEnesis controller provides a PCI
76 * interface with DMA support. Each card may have between 512K and
77 * 2MB of SRAM on board depending on the configuration.
79 * The SysKonnect GEnesis controller can have either one or two XMAC
80 * chips connected to it, allowing single or dual port NIC configurations.
81 * SysKonnect has the distinction of being the only vendor on the market
82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
84 * XMAC registers. This driver takes advantage of these features to allow
85 * both XMACs to operate as independent interfaces.
88 #include <sys/param.h>
89 #include <sys/systm.h>
91 #include <sys/endian.h>
93 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/module.h>
96 #include <sys/socket.h>
97 #include <sys/sockio.h>
98 #include <sys/queue.h>
99 #include <sys/sysctl.h>
102 #include <net/ethernet.h>
104 #include <net/if_arp.h>
105 #include <net/if_dl.h>
106 #include <net/if_media.h>
107 #include <net/if_types.h>
108 #include <net/if_vlan_var.h>
110 #include <netinet/in.h>
111 #include <netinet/in_systm.h>
112 #include <netinet/ip.h>
114 #include <machine/bus.h>
115 #include <machine/in_cksum.h>
116 #include <machine/resource.h>
117 #include <sys/rman.h>
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/brgphyreg.h>
123 #include <dev/pci/pcireg.h>
124 #include <dev/pci/pcivar.h>
127 #define SK_USEIOSPACE
130 #include <dev/sk/if_skreg.h>
131 #include <dev/sk/xmaciireg.h>
132 #include <dev/sk/yukonreg.h>
134 MODULE_DEPEND(sk, pci, 1, 1, 1);
135 MODULE_DEPEND(sk, ether, 1, 1, 1);
136 MODULE_DEPEND(sk, miibus, 1, 1, 1);
138 /* "device miibus" required. See GENERIC if you get errors here. */
139 #include "miibus_if.h"
142 static const char rcsid[] =
146 static struct sk_type sk_devs[] = {
150 "SysKonnect Gigabit Ethernet (V1.0)"
155 "SysKonnect Gigabit Ethernet (V2.0)"
160 "Marvell Gigabit Ethernet"
164 DEVICEID_BELKIN_5005,
165 "Belkin F5D5005 Gigabit Ethernet"
170 "3Com 3C940 Gigabit Ethernet"
174 DEVICEID_LINKSYS_EG1032,
175 "Linksys EG1032 Gigabit Ethernet"
179 DEVICEID_DLINK_DGE530T_A1,
180 "D-Link DGE-530T Gigabit Ethernet"
184 DEVICEID_DLINK_DGE530T_B1,
185 "D-Link DGE-530T Gigabit Ethernet"
190 static int skc_probe(device_t);
191 static int skc_attach(device_t);
192 static int skc_detach(device_t);
193 static int skc_shutdown(device_t);
194 static int skc_suspend(device_t);
195 static int skc_resume(device_t);
196 static int sk_detach(device_t);
197 static int sk_probe(device_t);
198 static int sk_attach(device_t);
199 static void sk_tick(void *);
200 static void sk_yukon_tick(void *);
201 static void sk_intr(void *);
202 static void sk_intr_xmac(struct sk_if_softc *);
203 static void sk_intr_bcom(struct sk_if_softc *);
204 static void sk_intr_yukon(struct sk_if_softc *);
205 static __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t);
206 static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
207 static void sk_rxeof(struct sk_if_softc *);
208 static void sk_jumbo_rxeof(struct sk_if_softc *);
209 static void sk_txeof(struct sk_if_softc *);
210 static void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *);
211 static int sk_encap(struct sk_if_softc *, struct mbuf **);
212 static void sk_start(struct ifnet *);
213 static void sk_start_locked(struct ifnet *);
214 static int sk_ioctl(struct ifnet *, u_long, caddr_t);
215 static void sk_init(void *);
216 static void sk_init_locked(struct sk_if_softc *);
217 static void sk_init_xmac(struct sk_if_softc *);
218 static void sk_init_yukon(struct sk_if_softc *);
219 static void sk_stop(struct sk_if_softc *);
220 static void sk_watchdog(void *);
221 static int sk_ifmedia_upd(struct ifnet *);
222 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
223 static void sk_reset(struct sk_softc *);
224 static __inline void sk_discard_rxbuf(struct sk_if_softc *, int);
225 static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int);
226 static int sk_newbuf(struct sk_if_softc *, int);
227 static int sk_jumbo_newbuf(struct sk_if_softc *, int);
228 static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
229 static int sk_dma_alloc(struct sk_if_softc *);
230 static int sk_dma_jumbo_alloc(struct sk_if_softc *);
231 static void sk_dma_free(struct sk_if_softc *);
232 static void sk_dma_jumbo_free(struct sk_if_softc *);
233 static int sk_init_rx_ring(struct sk_if_softc *);
234 static int sk_init_jumbo_rx_ring(struct sk_if_softc *);
235 static void sk_init_tx_ring(struct sk_if_softc *);
236 static u_int32_t sk_win_read_4(struct sk_softc *, int);
237 static u_int16_t sk_win_read_2(struct sk_softc *, int);
238 static u_int8_t sk_win_read_1(struct sk_softc *, int);
239 static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
240 static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
241 static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
243 static int sk_miibus_readreg(device_t, int, int);
244 static int sk_miibus_writereg(device_t, int, int, int);
245 static void sk_miibus_statchg(device_t);
247 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
248 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
250 static void sk_xmac_miibus_statchg(struct sk_if_softc *);
252 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
253 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
255 static void sk_marv_miibus_statchg(struct sk_if_softc *);
257 static uint32_t sk_xmchash(const uint8_t *);
258 static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int);
259 static void sk_rxfilter(struct sk_if_softc *);
260 static void sk_rxfilter_genesis(struct sk_if_softc *);
261 static void sk_rxfilter_yukon(struct sk_if_softc *);
263 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
264 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
267 static int jumbo_disable = 0;
268 TUNABLE_INT("hw.skc.jumbo_disable", &jumbo_disable);
271 * It seems that SK-NET GENESIS supports very simple checksum offload
272 * capability for Tx and I believe it can generate 0 checksum value for
273 * UDP packets in Tx as the hardware can't differenciate UDP packets from
274 * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it
275 * means sender didn't perforam checksum computation. For the safety I
276 * disabled UDP checksum offload capability at the moment. Alternatively
277 * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum
280 #define SK_CSUM_FEATURES (CSUM_TCP)
283 * Note that we have newbus methods for both the GEnesis controller
284 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
285 * the miibus code is a child of the XMACs. We need to do it this way
286 * so that the miibus drivers can access the PHY registers on the
287 * right PHY. It's not quite what I had in mind, but it's the only
288 * design that achieves the desired effect.
290 static device_method_t skc_methods[] = {
291 /* Device interface */
292 DEVMETHOD(device_probe, skc_probe),
293 DEVMETHOD(device_attach, skc_attach),
294 DEVMETHOD(device_detach, skc_detach),
295 DEVMETHOD(device_suspend, skc_suspend),
296 DEVMETHOD(device_resume, skc_resume),
297 DEVMETHOD(device_shutdown, skc_shutdown),
300 DEVMETHOD(bus_print_child, bus_generic_print_child),
301 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
306 static driver_t skc_driver = {
309 sizeof(struct sk_softc)
312 static devclass_t skc_devclass;
314 static device_method_t sk_methods[] = {
315 /* Device interface */
316 DEVMETHOD(device_probe, sk_probe),
317 DEVMETHOD(device_attach, sk_attach),
318 DEVMETHOD(device_detach, sk_detach),
319 DEVMETHOD(device_shutdown, bus_generic_shutdown),
322 DEVMETHOD(bus_print_child, bus_generic_print_child),
323 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
326 DEVMETHOD(miibus_readreg, sk_miibus_readreg),
327 DEVMETHOD(miibus_writereg, sk_miibus_writereg),
328 DEVMETHOD(miibus_statchg, sk_miibus_statchg),
333 static driver_t sk_driver = {
336 sizeof(struct sk_if_softc)
339 static devclass_t sk_devclass;
341 DRIVER_MODULE(skc, pci, skc_driver, skc_devclass, 0, 0);
342 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
343 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
345 static struct resource_spec sk_res_spec_io[] = {
346 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
347 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
351 static struct resource_spec sk_res_spec_mem[] = {
352 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
353 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
357 #define SK_SETBIT(sc, reg, x) \
358 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
360 #define SK_CLRBIT(sc, reg, x) \
361 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
363 #define SK_WIN_SETBIT_4(sc, reg, x) \
364 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
366 #define SK_WIN_CLRBIT_4(sc, reg, x) \
367 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
369 #define SK_WIN_SETBIT_2(sc, reg, x) \
370 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
372 #define SK_WIN_CLRBIT_2(sc, reg, x) \
373 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
376 sk_win_read_4(sc, reg)
381 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
382 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
384 return(CSR_READ_4(sc, reg));
389 sk_win_read_2(sc, reg)
394 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
395 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
397 return(CSR_READ_2(sc, reg));
402 sk_win_read_1(sc, reg)
407 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
408 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
410 return(CSR_READ_1(sc, reg));
415 sk_win_write_4(sc, reg, val)
421 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
422 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
424 CSR_WRITE_4(sc, reg, val);
430 sk_win_write_2(sc, reg, val)
436 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
437 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
439 CSR_WRITE_2(sc, reg, val);
445 sk_win_write_1(sc, reg, val)
451 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
452 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
454 CSR_WRITE_1(sc, reg, val);
460 sk_miibus_readreg(dev, phy, reg)
464 struct sk_if_softc *sc_if;
467 sc_if = device_get_softc(dev);
469 SK_IF_MII_LOCK(sc_if);
470 switch(sc_if->sk_softc->sk_type) {
472 v = sk_xmac_miibus_readreg(sc_if, phy, reg);
477 v = sk_marv_miibus_readreg(sc_if, phy, reg);
483 SK_IF_MII_UNLOCK(sc_if);
489 sk_miibus_writereg(dev, phy, reg, val)
493 struct sk_if_softc *sc_if;
496 sc_if = device_get_softc(dev);
498 SK_IF_MII_LOCK(sc_if);
499 switch(sc_if->sk_softc->sk_type) {
501 v = sk_xmac_miibus_writereg(sc_if, phy, reg, val);
506 v = sk_marv_miibus_writereg(sc_if, phy, reg, val);
512 SK_IF_MII_UNLOCK(sc_if);
518 sk_miibus_statchg(dev)
521 struct sk_if_softc *sc_if;
523 sc_if = device_get_softc(dev);
525 SK_IF_MII_LOCK(sc_if);
526 switch(sc_if->sk_softc->sk_type) {
528 sk_xmac_miibus_statchg(sc_if);
533 sk_marv_miibus_statchg(sc_if);
536 SK_IF_MII_UNLOCK(sc_if);
542 sk_xmac_miibus_readreg(sc_if, phy, reg)
543 struct sk_if_softc *sc_if;
548 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
551 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
552 SK_XM_READ_2(sc_if, XM_PHY_DATA);
553 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
554 for (i = 0; i < SK_TIMEOUT; i++) {
556 if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
557 XM_MMUCMD_PHYDATARDY)
561 if (i == SK_TIMEOUT) {
562 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
567 i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
573 sk_xmac_miibus_writereg(sc_if, phy, reg, val)
574 struct sk_if_softc *sc_if;
579 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
580 for (i = 0; i < SK_TIMEOUT; i++) {
581 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
585 if (i == SK_TIMEOUT) {
586 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
590 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
591 for (i = 0; i < SK_TIMEOUT; i++) {
593 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
597 if_printf(sc_if->sk_ifp, "phy write timed out\n");
603 sk_xmac_miibus_statchg(sc_if)
604 struct sk_if_softc *sc_if;
606 struct mii_data *mii;
608 mii = device_get_softc(sc_if->sk_miibus);
611 * If this is a GMII PHY, manually set the XMAC's
612 * duplex mode accordingly.
614 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
615 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
616 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
618 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
624 sk_marv_miibus_readreg(sc_if, phy, reg)
625 struct sk_if_softc *sc_if;
632 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
633 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
637 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
638 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
640 for (i = 0; i < SK_TIMEOUT; i++) {
642 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
643 if (val & YU_SMICR_READ_VALID)
647 if (i == SK_TIMEOUT) {
648 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
652 val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
658 sk_marv_miibus_writereg(sc_if, phy, reg, val)
659 struct sk_if_softc *sc_if;
664 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
665 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
666 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
668 for (i = 0; i < SK_TIMEOUT; i++) {
670 if ((SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) == 0)
674 if_printf(sc_if->sk_ifp, "phy write timeout\n");
680 sk_marv_miibus_statchg(sc_if)
681 struct sk_if_softc *sc_if;
694 /* Compute CRC for the address value. */
695 crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
697 return (~crc & ((1 << HASH_BITS) - 1));
701 sk_setfilt(sc_if, addr, slot)
702 struct sk_if_softc *sc_if;
708 base = XM_RXFILT_ENTRY(slot);
710 SK_XM_WRITE_2(sc_if, base, addr[0]);
711 SK_XM_WRITE_2(sc_if, base + 2, addr[1]);
712 SK_XM_WRITE_2(sc_if, base + 4, addr[2]);
719 struct sk_if_softc *sc_if;
723 SK_IF_LOCK_ASSERT(sc_if);
725 sc = sc_if->sk_softc;
726 if (sc->sk_type == SK_GENESIS)
727 sk_rxfilter_genesis(sc_if);
729 sk_rxfilter_yukon(sc_if);
733 sk_rxfilter_genesis(sc_if)
734 struct sk_if_softc *sc_if;
736 struct ifnet *ifp = sc_if->sk_ifp;
737 u_int32_t hashes[2] = { 0, 0 }, mode;
739 struct ifmultiaddr *ifma;
740 u_int16_t dummy[] = { 0, 0, 0 };
741 u_int16_t maddr[(ETHER_ADDR_LEN+1)/2];
743 SK_IF_LOCK_ASSERT(sc_if);
745 mode = SK_XM_READ_4(sc_if, XM_MODE);
746 mode &= ~(XM_MODE_RX_PROMISC | XM_MODE_RX_USE_HASH |
747 XM_MODE_RX_USE_PERFECT);
748 /* First, zot all the existing perfect filters. */
749 for (i = 1; i < XM_RXFILT_MAX; i++)
750 sk_setfilt(sc_if, dummy, i);
752 /* Now program new ones. */
753 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
754 if (ifp->if_flags & IFF_ALLMULTI)
755 mode |= XM_MODE_RX_USE_HASH;
756 if (ifp->if_flags & IFF_PROMISC)
757 mode |= XM_MODE_RX_PROMISC;
758 hashes[0] = 0xFFFFFFFF;
759 hashes[1] = 0xFFFFFFFF;
763 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead,
765 if (ifma->ifma_addr->sa_family != AF_LINK)
768 * Program the first XM_RXFILT_MAX multicast groups
769 * into the perfect filter.
771 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
772 maddr, ETHER_ADDR_LEN);
773 if (i < XM_RXFILT_MAX) {
774 sk_setfilt(sc_if, maddr, i);
775 mode |= XM_MODE_RX_USE_PERFECT;
779 h = sk_xmchash((const uint8_t *)maddr);
781 hashes[0] |= (1 << h);
783 hashes[1] |= (1 << (h - 32));
784 mode |= XM_MODE_RX_USE_HASH;
786 if_maddr_runlock(ifp);
789 SK_XM_WRITE_4(sc_if, XM_MODE, mode);
790 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
791 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
795 sk_rxfilter_yukon(sc_if)
796 struct sk_if_softc *sc_if;
799 u_int32_t crc, hashes[2] = { 0, 0 }, mode;
800 struct ifmultiaddr *ifma;
802 SK_IF_LOCK_ASSERT(sc_if);
805 mode = SK_YU_READ_2(sc_if, YUKON_RCR);
806 if (ifp->if_flags & IFF_PROMISC)
807 mode &= ~(YU_RCR_UFLEN | YU_RCR_MUFLEN);
808 else if (ifp->if_flags & IFF_ALLMULTI) {
809 mode |= YU_RCR_UFLEN | YU_RCR_MUFLEN;
810 hashes[0] = 0xFFFFFFFF;
811 hashes[1] = 0xFFFFFFFF;
813 mode |= YU_RCR_UFLEN;
815 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
816 if (ifma->ifma_addr->sa_family != AF_LINK)
818 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
819 ifma->ifma_addr), ETHER_ADDR_LEN);
820 /* Just want the 6 least significant bits. */
822 /* Set the corresponding bit in the hash table. */
823 hashes[crc >> 5] |= 1 << (crc & 0x1f);
825 if_maddr_runlock(ifp);
826 if (hashes[0] != 0 || hashes[1] != 0)
827 mode |= YU_RCR_MUFLEN;
830 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
831 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
832 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
833 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
834 SK_YU_WRITE_2(sc_if, YUKON_RCR, mode);
838 sk_init_rx_ring(sc_if)
839 struct sk_if_softc *sc_if;
841 struct sk_ring_data *rd;
843 u_int32_t csum_start;
846 sc_if->sk_cdata.sk_rx_cons = 0;
848 csum_start = (ETHER_HDR_LEN + sizeof(struct ip)) << 16 |
850 rd = &sc_if->sk_rdata;
851 bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
852 for (i = 0; i < SK_RX_RING_CNT; i++) {
853 if (sk_newbuf(sc_if, i) != 0)
855 if (i == (SK_RX_RING_CNT - 1))
856 addr = SK_RX_RING_ADDR(sc_if, 0);
858 addr = SK_RX_RING_ADDR(sc_if, i + 1);
859 rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
860 rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start);
863 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
864 sc_if->sk_cdata.sk_rx_ring_map,
865 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
871 sk_init_jumbo_rx_ring(sc_if)
872 struct sk_if_softc *sc_if;
874 struct sk_ring_data *rd;
876 u_int32_t csum_start;
879 sc_if->sk_cdata.sk_jumbo_rx_cons = 0;
881 csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) |
883 rd = &sc_if->sk_rdata;
884 bzero(rd->sk_jumbo_rx_ring,
885 sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT);
886 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
887 if (sk_jumbo_newbuf(sc_if, i) != 0)
889 if (i == (SK_JUMBO_RX_RING_CNT - 1))
890 addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0);
892 addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1);
893 rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
894 rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start);
897 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
898 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
899 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
905 sk_init_tx_ring(sc_if)
906 struct sk_if_softc *sc_if;
908 struct sk_ring_data *rd;
909 struct sk_txdesc *txd;
913 STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq);
914 STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq);
916 sc_if->sk_cdata.sk_tx_prod = 0;
917 sc_if->sk_cdata.sk_tx_cons = 0;
918 sc_if->sk_cdata.sk_tx_cnt = 0;
920 rd = &sc_if->sk_rdata;
921 bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
922 for (i = 0; i < SK_TX_RING_CNT; i++) {
923 if (i == (SK_TX_RING_CNT - 1))
924 addr = SK_TX_RING_ADDR(sc_if, 0);
926 addr = SK_TX_RING_ADDR(sc_if, i + 1);
927 rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
928 txd = &sc_if->sk_cdata.sk_txdesc[i];
929 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
932 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
933 sc_if->sk_cdata.sk_tx_ring_map,
934 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
938 sk_discard_rxbuf(sc_if, idx)
939 struct sk_if_softc *sc_if;
942 struct sk_rx_desc *r;
943 struct sk_rxdesc *rxd;
947 r = &sc_if->sk_rdata.sk_rx_ring[idx];
948 rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
950 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
954 sk_discard_jumbo_rxbuf(sc_if, idx)
955 struct sk_if_softc *sc_if;
958 struct sk_rx_desc *r;
959 struct sk_rxdesc *rxd;
962 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
963 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
965 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
969 sk_newbuf(sc_if, idx)
970 struct sk_if_softc *sc_if;
973 struct sk_rx_desc *r;
974 struct sk_rxdesc *rxd;
976 bus_dma_segment_t segs[1];
980 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
983 m->m_len = m->m_pkthdr.len = MCLBYTES;
984 m_adj(m, ETHER_ALIGN);
986 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag,
987 sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) {
991 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
993 rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
994 if (rxd->rx_m != NULL) {
995 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
996 BUS_DMASYNC_POSTREAD);
997 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap);
999 map = rxd->rx_dmamap;
1000 rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap;
1001 sc_if->sk_cdata.sk_rx_sparemap = map;
1002 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
1003 BUS_DMASYNC_PREREAD);
1005 r = &sc_if->sk_rdata.sk_rx_ring[idx];
1006 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
1007 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
1008 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1014 sk_jumbo_newbuf(sc_if, idx)
1015 struct sk_if_softc *sc_if;
1018 struct sk_rx_desc *r;
1019 struct sk_rxdesc *rxd;
1021 bus_dma_segment_t segs[1];
1025 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1028 if ((m->m_flags & M_EXT) == 0) {
1032 m->m_pkthdr.len = m->m_len = MJUM9BYTES;
1034 * Adjust alignment so packet payload begins on a
1035 * longword boundary. Mandatory for Alpha, useful on
1038 m_adj(m, ETHER_ALIGN);
1040 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag,
1041 sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1045 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1047 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
1048 if (rxd->rx_m != NULL) {
1049 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1050 BUS_DMASYNC_POSTREAD);
1051 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
1054 map = rxd->rx_dmamap;
1055 rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap;
1056 sc_if->sk_cdata.sk_jumbo_rx_sparemap = map;
1057 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1058 BUS_DMASYNC_PREREAD);
1060 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
1061 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
1062 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
1063 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1069 * Set media options.
1075 struct sk_if_softc *sc_if = ifp->if_softc;
1076 struct mii_data *mii;
1078 mii = device_get_softc(sc_if->sk_miibus);
1086 * Report current media status.
1089 sk_ifmedia_sts(ifp, ifmr)
1091 struct ifmediareq *ifmr;
1093 struct sk_if_softc *sc_if;
1094 struct mii_data *mii;
1096 sc_if = ifp->if_softc;
1097 mii = device_get_softc(sc_if->sk_miibus);
1100 ifmr->ifm_active = mii->mii_media_active;
1101 ifmr->ifm_status = mii->mii_media_status;
1107 sk_ioctl(ifp, command, data)
1112 struct sk_if_softc *sc_if = ifp->if_softc;
1113 struct ifreq *ifr = (struct ifreq *) data;
1115 struct mii_data *mii;
1120 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > SK_JUMBO_MTU)
1122 else if (ifp->if_mtu != ifr->ifr_mtu) {
1123 if (sc_if->sk_jumbo_disable != 0 &&
1124 ifr->ifr_mtu > SK_MAX_FRAMELEN)
1128 ifp->if_mtu = ifr->ifr_mtu;
1129 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1130 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1131 sk_init_locked(sc_if);
1133 SK_IF_UNLOCK(sc_if);
1139 if (ifp->if_flags & IFF_UP) {
1140 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1141 if ((ifp->if_flags ^ sc_if->sk_if_flags)
1142 & (IFF_PROMISC | IFF_ALLMULTI))
1145 sk_init_locked(sc_if);
1147 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1150 sc_if->sk_if_flags = ifp->if_flags;
1151 SK_IF_UNLOCK(sc_if);
1156 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1158 SK_IF_UNLOCK(sc_if);
1162 mii = device_get_softc(sc_if->sk_miibus);
1163 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1167 if (sc_if->sk_softc->sk_type == SK_GENESIS) {
1168 SK_IF_UNLOCK(sc_if);
1171 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1172 if (mask & IFCAP_HWCSUM) {
1173 ifp->if_capenable ^= IFCAP_HWCSUM;
1174 if (IFCAP_HWCSUM & ifp->if_capenable &&
1175 IFCAP_HWCSUM & ifp->if_capabilities)
1176 ifp->if_hwassist = SK_CSUM_FEATURES;
1178 ifp->if_hwassist = 0;
1180 SK_IF_UNLOCK(sc_if);
1183 error = ether_ioctl(ifp, command, data);
1191 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1192 * IDs against our list and return a device name if we find a match.
1198 struct sk_type *t = sk_devs;
1200 while(t->sk_name != NULL) {
1201 if ((pci_get_vendor(dev) == t->sk_vid) &&
1202 (pci_get_device(dev) == t->sk_did)) {
1204 * Only attach to rev. 2 of the Linksys EG1032 adapter.
1205 * Rev. 3 is supported by re(4).
1207 if ((t->sk_vid == VENDORID_LINKSYS) &&
1208 (t->sk_did == DEVICEID_LINKSYS_EG1032) &&
1209 (pci_get_subdevice(dev) !=
1210 SUBDEVICEID_LINKSYS_EG1032_REV2)) {
1214 device_set_desc(dev, t->sk_name);
1215 return (BUS_PROBE_DEFAULT);
1224 * Force the GEnesis into reset, then bring it out of reset.
1228 struct sk_softc *sc;
1231 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1232 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1233 if (SK_YUKON_FAMILY(sc->sk_type))
1234 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1237 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1239 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1240 if (SK_YUKON_FAMILY(sc->sk_type))
1241 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1243 if (sc->sk_type == SK_GENESIS) {
1244 /* Configure packet arbiter */
1245 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1246 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1247 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1248 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1249 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1252 /* Enable RAM interface */
1253 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1256 * Configure interrupt moderation. The moderation timer
1257 * defers interrupts specified in the interrupt moderation
1258 * timer mask based on the timeout specified in the interrupt
1259 * moderation timer init register. Each bit in the timer
1260 * register represents one tick, so to specify a timeout in
1261 * microseconds, we have to multiply by the correct number of
1262 * ticks-per-microsecond.
1264 switch (sc->sk_type) {
1266 sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS;
1269 sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON;
1273 device_printf(sc->sk_dev, "interrupt moderation is %d us\n",
1275 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
1277 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1278 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1279 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1288 struct sk_softc *sc;
1290 sc = device_get_softc(device_get_parent(dev));
1293 * Not much to do here. We always know there will be
1294 * at least one XMAC present, and if there are two,
1295 * skc_attach() will create a second device instance
1298 switch (sc->sk_type) {
1300 device_set_desc(dev, "XaQti Corp. XMAC II");
1305 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1309 return (BUS_PROBE_DEFAULT);
1313 * Each XMAC chip is attached as a separate logical IP interface.
1314 * Single port cards will have only one logical interface of course.
1320 struct sk_softc *sc;
1321 struct sk_if_softc *sc_if;
1330 sc_if = device_get_softc(dev);
1331 sc = device_get_softc(device_get_parent(dev));
1332 port = *(int *)device_get_ivars(dev);
1334 sc_if->sk_if_dev = dev;
1335 sc_if->sk_port = port;
1336 sc_if->sk_softc = sc;
1337 sc->sk_if[port] = sc_if;
1338 if (port == SK_PORT_A)
1339 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1340 if (port == SK_PORT_B)
1341 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1343 callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0);
1344 callout_init_mtx(&sc_if->sk_watchdog_ch, &sc_if->sk_softc->sk_mtx, 0);
1346 if (sk_dma_alloc(sc_if) != 0) {
1350 sk_dma_jumbo_alloc(sc_if);
1352 ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
1354 device_printf(sc_if->sk_if_dev, "can not if_alloc()\n");
1358 ifp->if_softc = sc_if;
1359 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1360 ifp->if_mtu = ETHERMTU;
1361 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1363 * SK_GENESIS has a bug in checksum offload - From linux.
1365 if (sc_if->sk_softc->sk_type != SK_GENESIS) {
1366 ifp->if_capabilities = IFCAP_HWCSUM;
1367 ifp->if_hwassist = SK_CSUM_FEATURES;
1369 ifp->if_capabilities = 0;
1370 ifp->if_hwassist = 0;
1372 ifp->if_capenable = ifp->if_capabilities;
1373 ifp->if_ioctl = sk_ioctl;
1374 ifp->if_start = sk_start;
1376 ifp->if_watchdog = NULL;
1377 ifp->if_init = sk_init;
1378 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1379 ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
1380 IFQ_SET_READY(&ifp->if_snd);
1383 * Get station address for this interface. Note that
1384 * dual port cards actually come with three station
1385 * addresses: one for each port, plus an extra. The
1386 * extra one is used by the SysKonnect driver software
1387 * as a 'virtual' station address for when both ports
1388 * are operating in failover mode. Currently we don't
1389 * use this extra address.
1392 for (i = 0; i < ETHER_ADDR_LEN; i++)
1394 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1397 * Set up RAM buffer addresses. The NIC will have a certain
1398 * amount of SRAM on it, somewhere between 512K and 2MB. We
1399 * need to divide this up a) between the transmitter and
1400 * receiver and b) between the two XMACs, if this is a
1401 * dual port NIC. Our algotithm is to divide up the memory
1402 * evenly so that everyone gets a fair share.
1404 * Just to be contrary, Yukon2 appears to have separate memory
1407 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1408 u_int32_t chunk, val;
1410 chunk = sc->sk_ramsize / 2;
1411 val = sc->sk_rboff / sizeof(u_int64_t);
1412 sc_if->sk_rx_ramstart = val;
1413 val += (chunk / sizeof(u_int64_t));
1414 sc_if->sk_rx_ramend = val - 1;
1415 sc_if->sk_tx_ramstart = val;
1416 val += (chunk / sizeof(u_int64_t));
1417 sc_if->sk_tx_ramend = val - 1;
1419 u_int32_t chunk, val;
1421 chunk = sc->sk_ramsize / 4;
1422 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1424 sc_if->sk_rx_ramstart = val;
1425 val += (chunk / sizeof(u_int64_t));
1426 sc_if->sk_rx_ramend = val - 1;
1427 sc_if->sk_tx_ramstart = val;
1428 val += (chunk / sizeof(u_int64_t));
1429 sc_if->sk_tx_ramend = val - 1;
1432 /* Read and save PHY type and set PHY address */
1433 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1434 if (!SK_YUKON_FAMILY(sc->sk_type)) {
1435 switch(sc_if->sk_phytype) {
1436 case SK_PHYTYPE_XMAC:
1437 sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1439 case SK_PHYTYPE_BCOM:
1440 sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1443 device_printf(sc->sk_dev, "unsupported PHY type: %d\n",
1446 SK_IF_UNLOCK(sc_if);
1450 if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
1451 sc->sk_pmd != 'S') {
1452 /* not initialized, punt */
1453 sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
1454 sc->sk_coppertype = 1;
1457 sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1459 if (!(sc->sk_coppertype))
1460 sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
1464 * Call MI attach routine. Can't hold locks when calling into ether_*.
1466 SK_IF_UNLOCK(sc_if);
1467 ether_ifattach(ifp, eaddr);
1471 * The hardware should be ready for VLAN_MTU by default:
1472 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
1473 * YU_SMR_MFL_VLAN is set by this driver in Yukon.
1476 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1477 ifp->if_capenable |= IFCAP_VLAN_MTU;
1479 * Tell the upper layer(s) we support long frames.
1480 * Must appear after the call to ether_ifattach() because
1481 * ether_ifattach() sets ifi_hdrlen to the default value.
1483 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1488 switch (sc->sk_type) {
1490 sk_init_xmac(sc_if);
1495 sk_init_yukon(sc_if);
1499 SK_IF_UNLOCK(sc_if);
1500 if (mii_phy_probe(dev, &sc_if->sk_miibus,
1501 sk_ifmedia_upd, sk_ifmedia_sts)) {
1502 device_printf(sc_if->sk_if_dev, "no PHY found!\n");
1503 ether_ifdetach(ifp);
1510 /* Access should be ok even though lock has been dropped */
1511 sc->sk_if[port] = NULL;
1519 * Attach the interface. Allocate softc structures, do ifmedia
1520 * setup and ethernet/BPF attach.
1526 struct sk_softc *sc;
1527 int error = 0, *port;
1529 const char *pname = NULL;
1532 sc = device_get_softc(dev);
1535 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1537 mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF);
1539 * Map control/status registers.
1541 pci_enable_busmaster(dev);
1543 /* Allocate resources */
1544 #ifdef SK_USEIOSPACE
1545 sc->sk_res_spec = sk_res_spec_io;
1547 sc->sk_res_spec = sk_res_spec_mem;
1549 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1551 if (sc->sk_res_spec == sk_res_spec_mem)
1552 sc->sk_res_spec = sk_res_spec_io;
1554 sc->sk_res_spec = sk_res_spec_mem;
1555 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1557 device_printf(dev, "couldn't allocate %s resources\n",
1558 sc->sk_res_spec == sk_res_spec_mem ? "memory" :
1564 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1565 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
1567 /* Bail out if chip is not recognized. */
1568 if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
1569 device_printf(dev, "unknown device: chipver=%02x, rev=%x\n",
1570 sc->sk_type, sc->sk_rev);
1575 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1576 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1577 OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW,
1578 &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I",
1579 "SK interrupt moderation");
1581 /* Pull in device tunables. */
1582 sc->sk_int_mod = SK_IM_DEFAULT;
1583 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1584 "int_mod", &sc->sk_int_mod);
1586 if (sc->sk_int_mod < SK_IM_MIN ||
1587 sc->sk_int_mod > SK_IM_MAX) {
1588 device_printf(dev, "int_mod value out of range; "
1589 "using default: %d\n", SK_IM_DEFAULT);
1590 sc->sk_int_mod = SK_IM_DEFAULT;
1594 /* Reset the adapter. */
1597 skrs = sk_win_read_1(sc, SK_EPROM0);
1598 if (sc->sk_type == SK_GENESIS) {
1599 /* Read and save RAM size and RAMbuffer offset */
1601 case SK_RAMSIZE_512K_64:
1602 sc->sk_ramsize = 0x80000;
1603 sc->sk_rboff = SK_RBOFF_0;
1605 case SK_RAMSIZE_1024K_64:
1606 sc->sk_ramsize = 0x100000;
1607 sc->sk_rboff = SK_RBOFF_80000;
1609 case SK_RAMSIZE_1024K_128:
1610 sc->sk_ramsize = 0x100000;
1611 sc->sk_rboff = SK_RBOFF_0;
1613 case SK_RAMSIZE_2048K_128:
1614 sc->sk_ramsize = 0x200000;
1615 sc->sk_rboff = SK_RBOFF_0;
1618 device_printf(dev, "unknown ram size: %d\n", skrs);
1622 } else { /* SK_YUKON_FAMILY */
1624 sc->sk_ramsize = 0x20000;
1626 sc->sk_ramsize = skrs * (1<<12);
1627 sc->sk_rboff = SK_RBOFF_0;
1630 /* Read and save physical media type */
1631 sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1633 if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
1634 sc->sk_coppertype = 1;
1636 sc->sk_coppertype = 0;
1638 /* Determine whether to name it with VPD PN or just make it up.
1639 * Marvell Yukon VPD PN seems to freqently be bogus. */
1640 switch (pci_get_device(dev)) {
1641 case DEVICEID_SK_V1:
1642 case DEVICEID_BELKIN_5005:
1643 case DEVICEID_3COM_3C940:
1644 case DEVICEID_LINKSYS_EG1032:
1645 case DEVICEID_DLINK_DGE530T_A1:
1646 case DEVICEID_DLINK_DGE530T_B1:
1647 /* Stay with VPD PN. */
1648 (void) pci_get_vpd_ident(dev, &pname);
1650 case DEVICEID_SK_V2:
1651 /* YUKON VPD PN might bear no resemblance to reality. */
1652 switch (sc->sk_type) {
1654 /* Stay with VPD PN. */
1655 (void) pci_get_vpd_ident(dev, &pname);
1658 pname = "Marvell Yukon Gigabit Ethernet";
1661 pname = "Marvell Yukon Lite Gigabit Ethernet";
1664 pname = "Marvell Yukon LP Gigabit Ethernet";
1667 pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
1671 /* Yukon Lite Rev. A0 needs special test. */
1672 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1676 /* Save flash address register before testing. */
1677 far = sk_win_read_4(sc, SK_EP_ADDR);
1679 sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
1680 testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
1682 if (testbyte != 0x00) {
1683 /* Yukon Lite Rev. A0 detected. */
1684 sc->sk_type = SK_YUKON_LITE;
1685 sc->sk_rev = SK_YUKON_LITE_REV_A0;
1686 /* Restore flash address register. */
1687 sk_win_write_4(sc, SK_EP_ADDR, far);
1692 device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
1693 "chipver=%02x, rev=%x\n",
1694 pci_get_vendor(dev), pci_get_device(dev),
1695 sc->sk_type, sc->sk_rev);
1700 if (sc->sk_type == SK_YUKON_LITE) {
1701 switch (sc->sk_rev) {
1702 case SK_YUKON_LITE_REV_A0:
1705 case SK_YUKON_LITE_REV_A1:
1708 case SK_YUKON_LITE_REV_A3:
1719 /* Announce the product name and more VPD data if there. */
1721 device_printf(dev, "%s rev. %s(0x%x)\n",
1722 pname, revstr, sc->sk_rev);
1725 device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type);
1726 device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev);
1727 device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs);
1728 device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
1731 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1732 if (sc->sk_devs[SK_PORT_A] == NULL) {
1733 device_printf(dev, "failed to add child for PORT_A\n");
1737 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1739 device_printf(dev, "failed to allocate memory for "
1740 "ivars of PORT_A\n");
1745 device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1747 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1748 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1749 if (sc->sk_devs[SK_PORT_B] == NULL) {
1750 device_printf(dev, "failed to add child for PORT_B\n");
1754 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1756 device_printf(dev, "failed to allocate memory for "
1757 "ivars of PORT_B\n");
1762 device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1765 /* Turn on the 'driver is loaded' LED. */
1766 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1768 error = bus_generic_attach(dev);
1770 device_printf(dev, "failed to attach port(s)\n");
1774 /* Hook interrupt last to avoid having to lock softc */
1775 error = bus_setup_intr(dev, sc->sk_res[1], INTR_TYPE_NET|INTR_MPSAFE,
1776 NULL, sk_intr, sc, &sc->sk_intrhand);
1779 device_printf(dev, "couldn't set up irq\n");
1791 * Shutdown hardware and free up resources. This can be called any
1792 * time after the mutex has been initialized. It is called in both
1793 * the error case in attach and the normal detach case so it needs
1794 * to be careful about only freeing resources that have actually been
1801 struct sk_if_softc *sc_if;
1804 sc_if = device_get_softc(dev);
1805 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1806 ("sk mutex not initialized in sk_detach"));
1809 ifp = sc_if->sk_ifp;
1810 /* These should only be active if attach_xmac succeeded */
1811 if (device_is_attached(dev)) {
1813 /* Can't hold locks while calling detach */
1814 SK_IF_UNLOCK(sc_if);
1815 callout_drain(&sc_if->sk_tick_ch);
1816 callout_drain(&sc_if->sk_watchdog_ch);
1817 ether_ifdetach(ifp);
1823 * We're generally called from skc_detach() which is using
1824 * device_delete_child() to get to here. It's already trashed
1825 * miibus for us, so don't do it here or we'll panic.
1828 if (sc_if->sk_miibus != NULL)
1829 device_delete_child(dev, sc_if->sk_miibus);
1831 bus_generic_detach(dev);
1832 sk_dma_jumbo_free(sc_if);
1834 SK_IF_UNLOCK(sc_if);
1843 struct sk_softc *sc;
1845 sc = device_get_softc(dev);
1846 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1848 if (device_is_alive(dev)) {
1849 if (sc->sk_devs[SK_PORT_A] != NULL) {
1850 free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
1851 device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1853 if (sc->sk_devs[SK_PORT_B] != NULL) {
1854 free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
1855 device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1857 bus_generic_detach(dev);
1860 if (sc->sk_intrhand)
1861 bus_teardown_intr(dev, sc->sk_res[1], sc->sk_intrhand);
1862 bus_release_resources(dev, sc->sk_res_spec, sc->sk_res);
1864 mtx_destroy(&sc->sk_mii_mtx);
1865 mtx_destroy(&sc->sk_mtx);
1870 struct sk_dmamap_arg {
1871 bus_addr_t sk_busaddr;
1875 sk_dmamap_cb(arg, segs, nseg, error)
1877 bus_dma_segment_t *segs;
1881 struct sk_dmamap_arg *ctx;
1887 ctx->sk_busaddr = segs[0].ds_addr;
1891 * Allocate jumbo buffer storage. The SysKonnect adapters support
1892 * "jumbograms" (9K frames), although SysKonnect doesn't currently
1893 * use them in their drivers. In order for us to use them, we need
1894 * large 9K receive buffers, however standard mbuf clusters are only
1895 * 2048 bytes in size. Consequently, we need to allocate and manage
1896 * our own jumbo buffer pool. Fortunately, this does not require an
1897 * excessive amount of additional code.
1901 struct sk_if_softc *sc_if;
1903 struct sk_dmamap_arg ctx;
1904 struct sk_txdesc *txd;
1905 struct sk_rxdesc *rxd;
1908 /* create parent tag */
1911 * This driver should use BUS_SPACE_MAXADDR for lowaddr argument
1912 * in bus_dma_tag_create(9) as the NIC would support DAC mode.
1913 * However bz@ reported that it does not work on amd64 with > 4GB
1914 * RAM. Until we have more clues of the breakage, disable DAC mode
1915 * by limiting DMA address to be in 32bit address space.
1917 error = bus_dma_tag_create(
1918 bus_get_dma_tag(sc_if->sk_if_dev),/* parent */
1919 1, 0, /* algnmnt, boundary */
1920 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1921 BUS_SPACE_MAXADDR, /* highaddr */
1922 NULL, NULL, /* filter, filterarg */
1923 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1925 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1927 NULL, NULL, /* lockfunc, lockarg */
1928 &sc_if->sk_cdata.sk_parent_tag);
1930 device_printf(sc_if->sk_if_dev,
1931 "failed to create parent DMA tag\n");
1935 /* create tag for Tx ring */
1936 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1937 SK_RING_ALIGN, 0, /* algnmnt, boundary */
1938 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1939 BUS_SPACE_MAXADDR, /* highaddr */
1940 NULL, NULL, /* filter, filterarg */
1941 SK_TX_RING_SZ, /* maxsize */
1943 SK_TX_RING_SZ, /* maxsegsize */
1945 NULL, NULL, /* lockfunc, lockarg */
1946 &sc_if->sk_cdata.sk_tx_ring_tag);
1948 device_printf(sc_if->sk_if_dev,
1949 "failed to allocate Tx ring DMA tag\n");
1953 /* create tag for Rx ring */
1954 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1955 SK_RING_ALIGN, 0, /* algnmnt, boundary */
1956 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1957 BUS_SPACE_MAXADDR, /* highaddr */
1958 NULL, NULL, /* filter, filterarg */
1959 SK_RX_RING_SZ, /* maxsize */
1961 SK_RX_RING_SZ, /* maxsegsize */
1963 NULL, NULL, /* lockfunc, lockarg */
1964 &sc_if->sk_cdata.sk_rx_ring_tag);
1966 device_printf(sc_if->sk_if_dev,
1967 "failed to allocate Rx ring DMA tag\n");
1971 /* create tag for Tx buffers */
1972 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1973 1, 0, /* algnmnt, boundary */
1974 BUS_SPACE_MAXADDR, /* lowaddr */
1975 BUS_SPACE_MAXADDR, /* highaddr */
1976 NULL, NULL, /* filter, filterarg */
1977 MCLBYTES * SK_MAXTXSEGS, /* maxsize */
1978 SK_MAXTXSEGS, /* nsegments */
1979 MCLBYTES, /* maxsegsize */
1981 NULL, NULL, /* lockfunc, lockarg */
1982 &sc_if->sk_cdata.sk_tx_tag);
1984 device_printf(sc_if->sk_if_dev,
1985 "failed to allocate Tx DMA tag\n");
1989 /* create tag for Rx buffers */
1990 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1991 1, 0, /* algnmnt, boundary */
1992 BUS_SPACE_MAXADDR, /* lowaddr */
1993 BUS_SPACE_MAXADDR, /* highaddr */
1994 NULL, NULL, /* filter, filterarg */
1995 MCLBYTES, /* maxsize */
1997 MCLBYTES, /* maxsegsize */
1999 NULL, NULL, /* lockfunc, lockarg */
2000 &sc_if->sk_cdata.sk_rx_tag);
2002 device_printf(sc_if->sk_if_dev,
2003 "failed to allocate Rx DMA tag\n");
2007 /* allocate DMA'able memory and load the DMA map for Tx ring */
2008 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag,
2009 (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
2010 &sc_if->sk_cdata.sk_tx_ring_map);
2012 device_printf(sc_if->sk_if_dev,
2013 "failed to allocate DMA'able memory for Tx ring\n");
2018 error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag,
2019 sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring,
2020 SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2022 device_printf(sc_if->sk_if_dev,
2023 "failed to load DMA'able memory for Tx ring\n");
2026 sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr;
2028 /* allocate DMA'able memory and load the DMA map for Rx ring */
2029 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag,
2030 (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
2031 &sc_if->sk_cdata.sk_rx_ring_map);
2033 device_printf(sc_if->sk_if_dev,
2034 "failed to allocate DMA'able memory for Rx ring\n");
2039 error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag,
2040 sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring,
2041 SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2043 device_printf(sc_if->sk_if_dev,
2044 "failed to load DMA'able memory for Rx ring\n");
2047 sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr;
2049 /* create DMA maps for Tx buffers */
2050 for (i = 0; i < SK_TX_RING_CNT; i++) {
2051 txd = &sc_if->sk_cdata.sk_txdesc[i];
2053 txd->tx_dmamap = NULL;
2054 error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0,
2057 device_printf(sc_if->sk_if_dev,
2058 "failed to create Tx dmamap\n");
2063 /* create DMA maps for Rx buffers */
2064 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2065 &sc_if->sk_cdata.sk_rx_sparemap)) != 0) {
2066 device_printf(sc_if->sk_if_dev,
2067 "failed to create spare Rx dmamap\n");
2070 for (i = 0; i < SK_RX_RING_CNT; i++) {
2071 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2073 rxd->rx_dmamap = NULL;
2074 error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2077 device_printf(sc_if->sk_if_dev,
2078 "failed to create Rx dmamap\n");
2088 sk_dma_jumbo_alloc(sc_if)
2089 struct sk_if_softc *sc_if;
2091 struct sk_dmamap_arg ctx;
2092 struct sk_rxdesc *jrxd;
2095 if (jumbo_disable != 0) {
2096 device_printf(sc_if->sk_if_dev, "disabling jumbo frame support\n");
2097 sc_if->sk_jumbo_disable = 1;
2100 /* create tag for jumbo Rx ring */
2101 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2102 SK_RING_ALIGN, 0, /* algnmnt, boundary */
2103 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2104 BUS_SPACE_MAXADDR, /* highaddr */
2105 NULL, NULL, /* filter, filterarg */
2106 SK_JUMBO_RX_RING_SZ, /* maxsize */
2108 SK_JUMBO_RX_RING_SZ, /* maxsegsize */
2110 NULL, NULL, /* lockfunc, lockarg */
2111 &sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2113 device_printf(sc_if->sk_if_dev,
2114 "failed to allocate jumbo Rx ring DMA tag\n");
2118 /* create tag for jumbo Rx buffers */
2119 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2120 1, 0, /* algnmnt, boundary */
2121 BUS_SPACE_MAXADDR, /* lowaddr */
2122 BUS_SPACE_MAXADDR, /* highaddr */
2123 NULL, NULL, /* filter, filterarg */
2124 MJUM9BYTES, /* maxsize */
2126 MJUM9BYTES, /* maxsegsize */
2128 NULL, NULL, /* lockfunc, lockarg */
2129 &sc_if->sk_cdata.sk_jumbo_rx_tag);
2131 device_printf(sc_if->sk_if_dev,
2132 "failed to allocate jumbo Rx DMA tag\n");
2136 /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */
2137 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2138 (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring,
2139 BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2141 device_printf(sc_if->sk_if_dev,
2142 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2147 error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2148 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2149 sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb,
2150 &ctx, BUS_DMA_NOWAIT);
2152 device_printf(sc_if->sk_if_dev,
2153 "failed to load DMA'able memory for jumbo Rx ring\n");
2156 sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr;
2158 /* create DMA maps for jumbo Rx buffers */
2159 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2160 &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) {
2161 device_printf(sc_if->sk_if_dev,
2162 "failed to create spare jumbo Rx dmamap\n");
2165 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2166 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2168 jrxd->rx_dmamap = NULL;
2169 error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2172 device_printf(sc_if->sk_if_dev,
2173 "failed to create jumbo Rx dmamap\n");
2181 sk_dma_jumbo_free(sc_if);
2182 device_printf(sc_if->sk_if_dev, "disabling jumbo frame support due to "
2183 "resource shortage\n");
2184 sc_if->sk_jumbo_disable = 1;
2190 struct sk_if_softc *sc_if;
2192 struct sk_txdesc *txd;
2193 struct sk_rxdesc *rxd;
2197 if (sc_if->sk_cdata.sk_tx_ring_tag) {
2198 if (sc_if->sk_cdata.sk_tx_ring_map)
2199 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag,
2200 sc_if->sk_cdata.sk_tx_ring_map);
2201 if (sc_if->sk_cdata.sk_tx_ring_map &&
2202 sc_if->sk_rdata.sk_tx_ring)
2203 bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag,
2204 sc_if->sk_rdata.sk_tx_ring,
2205 sc_if->sk_cdata.sk_tx_ring_map);
2206 sc_if->sk_rdata.sk_tx_ring = NULL;
2207 sc_if->sk_cdata.sk_tx_ring_map = NULL;
2208 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag);
2209 sc_if->sk_cdata.sk_tx_ring_tag = NULL;
2212 if (sc_if->sk_cdata.sk_rx_ring_tag) {
2213 if (sc_if->sk_cdata.sk_rx_ring_map)
2214 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag,
2215 sc_if->sk_cdata.sk_rx_ring_map);
2216 if (sc_if->sk_cdata.sk_rx_ring_map &&
2217 sc_if->sk_rdata.sk_rx_ring)
2218 bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag,
2219 sc_if->sk_rdata.sk_rx_ring,
2220 sc_if->sk_cdata.sk_rx_ring_map);
2221 sc_if->sk_rdata.sk_rx_ring = NULL;
2222 sc_if->sk_cdata.sk_rx_ring_map = NULL;
2223 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag);
2224 sc_if->sk_cdata.sk_rx_ring_tag = NULL;
2227 if (sc_if->sk_cdata.sk_tx_tag) {
2228 for (i = 0; i < SK_TX_RING_CNT; i++) {
2229 txd = &sc_if->sk_cdata.sk_txdesc[i];
2230 if (txd->tx_dmamap) {
2231 bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag,
2233 txd->tx_dmamap = NULL;
2236 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag);
2237 sc_if->sk_cdata.sk_tx_tag = NULL;
2240 if (sc_if->sk_cdata.sk_rx_tag) {
2241 for (i = 0; i < SK_RX_RING_CNT; i++) {
2242 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2243 if (rxd->rx_dmamap) {
2244 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2246 rxd->rx_dmamap = NULL;
2249 if (sc_if->sk_cdata.sk_rx_sparemap) {
2250 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2251 sc_if->sk_cdata.sk_rx_sparemap);
2252 sc_if->sk_cdata.sk_rx_sparemap = NULL;
2254 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag);
2255 sc_if->sk_cdata.sk_rx_tag = NULL;
2258 if (sc_if->sk_cdata.sk_parent_tag) {
2259 bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag);
2260 sc_if->sk_cdata.sk_parent_tag = NULL;
2265 sk_dma_jumbo_free(sc_if)
2266 struct sk_if_softc *sc_if;
2268 struct sk_rxdesc *jrxd;
2272 if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) {
2273 if (sc_if->sk_cdata.sk_jumbo_rx_ring_map)
2274 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2275 sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2276 if (sc_if->sk_cdata.sk_jumbo_rx_ring_map &&
2277 sc_if->sk_rdata.sk_jumbo_rx_ring)
2278 bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2279 sc_if->sk_rdata.sk_jumbo_rx_ring,
2280 sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2281 sc_if->sk_rdata.sk_jumbo_rx_ring = NULL;
2282 sc_if->sk_cdata.sk_jumbo_rx_ring_map = NULL;
2283 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2284 sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL;
2287 /* jumbo Rx buffers */
2288 if (sc_if->sk_cdata.sk_jumbo_rx_tag) {
2289 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2290 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2291 if (jrxd->rx_dmamap) {
2293 sc_if->sk_cdata.sk_jumbo_rx_tag,
2295 jrxd->rx_dmamap = NULL;
2298 if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) {
2299 bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag,
2300 sc_if->sk_cdata.sk_jumbo_rx_sparemap);
2301 sc_if->sk_cdata.sk_jumbo_rx_sparemap = NULL;
2303 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag);
2304 sc_if->sk_cdata.sk_jumbo_rx_tag = NULL;
2309 sk_txcksum(ifp, m, f)
2312 struct sk_tx_desc *f;
2318 offset = sizeof(struct ip) + ETHER_HDR_LEN;
2319 for(; m && m->m_len == 0; m = m->m_next)
2321 if (m == NULL || m->m_len < ETHER_HDR_LEN) {
2322 if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__);
2323 /* checksum may be corrupted */
2326 if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
2327 if (m->m_len != ETHER_HDR_LEN) {
2328 if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n",
2330 /* checksum may be corrupted */
2333 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
2336 offset = sizeof(struct ip) + ETHER_HDR_LEN;
2337 /* checksum may be corrupted */
2340 ip = mtod(m, struct ip *);
2342 p = mtod(m, u_int8_t *);
2344 ip = (struct ip *)p;
2346 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
2349 f->sk_csum_startval = 0;
2350 f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) |
2355 sk_encap(sc_if, m_head)
2356 struct sk_if_softc *sc_if;
2357 struct mbuf **m_head;
2359 struct sk_txdesc *txd;
2360 struct sk_tx_desc *f = NULL;
2362 bus_dma_segment_t txsegs[SK_MAXTXSEGS];
2363 u_int32_t cflags, frag, si, sk_ctl;
2366 SK_IF_LOCK_ASSERT(sc_if);
2368 if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL)
2371 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2372 txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2373 if (error == EFBIG) {
2374 m = m_defrag(*m_head, M_DONTWAIT);
2381 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2382 txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2388 } else if (error != 0)
2395 if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) {
2396 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2401 if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0)
2402 cflags = SK_OPCODE_CSUM;
2404 cflags = SK_OPCODE_DEFAULT;
2405 si = frag = sc_if->sk_cdata.sk_tx_prod;
2406 for (i = 0; i < nseg; i++) {
2407 f = &sc_if->sk_rdata.sk_tx_ring[frag];
2408 f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr));
2409 f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr));
2410 sk_ctl = txsegs[i].ds_len | cflags;
2412 if (cflags == SK_OPCODE_CSUM)
2413 sk_txcksum(sc_if->sk_ifp, m, f);
2414 sk_ctl |= SK_TXCTL_FIRSTFRAG;
2416 sk_ctl |= SK_TXCTL_OWN;
2417 f->sk_ctl = htole32(sk_ctl);
2418 sc_if->sk_cdata.sk_tx_cnt++;
2419 SK_INC(frag, SK_TX_RING_CNT);
2421 sc_if->sk_cdata.sk_tx_prod = frag;
2423 /* set EOF on the last desciptor */
2424 frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT;
2425 f = &sc_if->sk_rdata.sk_tx_ring[frag];
2426 f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR);
2428 /* turn the first descriptor ownership to NIC */
2429 f = &sc_if->sk_rdata.sk_tx_ring[si];
2430 f->sk_ctl |= htole32(SK_TXCTL_OWN);
2432 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q);
2433 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q);
2436 /* sync descriptors */
2437 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2438 BUS_DMASYNC_PREWRITE);
2439 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2440 sc_if->sk_cdata.sk_tx_ring_map,
2441 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2450 struct sk_if_softc *sc_if;
2452 sc_if = ifp->if_softc;
2455 sk_start_locked(ifp);
2456 SK_IF_UNLOCK(sc_if);
2462 sk_start_locked(ifp)
2465 struct sk_softc *sc;
2466 struct sk_if_softc *sc_if;
2467 struct mbuf *m_head;
2470 sc_if = ifp->if_softc;
2471 sc = sc_if->sk_softc;
2473 SK_IF_LOCK_ASSERT(sc_if);
2475 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2476 sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) {
2477 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2482 * Pack the data into the transmit ring. If we
2483 * don't have room, set the OACTIVE flag and wait
2484 * for the NIC to drain the ring.
2486 if (sk_encap(sc_if, &m_head)) {
2489 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2490 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2496 * If there's a BPF listener, bounce a copy of this frame
2499 BPF_MTAP(ifp, m_head);
2504 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2506 /* Set a timeout in case the chip goes out to lunch. */
2507 sc_if->sk_watchdog_timer = 5;
2516 struct sk_if_softc *sc_if;
2520 sc_if = ifp->if_softc;
2522 SK_IF_LOCK_ASSERT(sc_if);
2524 if (sc_if->sk_watchdog_timer == 0 || --sc_if->sk_watchdog_timer)
2528 * Reclaim first as there is a possibility of losing Tx completion
2532 if (sc_if->sk_cdata.sk_tx_cnt != 0) {
2533 if_printf(sc_if->sk_ifp, "watchdog timeout\n");
2535 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2536 sk_init_locked(sc_if);
2540 callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
2549 struct sk_softc *sc;
2551 sc = device_get_softc(dev);
2554 /* Turn off the 'driver is loaded' LED. */
2555 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
2558 * Reset the GEnesis controller. Doing this should also
2559 * assert the resets on the attached XMAC(s).
2571 struct sk_softc *sc;
2572 struct sk_if_softc *sc_if0, *sc_if1;
2573 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2575 sc = device_get_softc(dev);
2579 sc_if0 = sc->sk_if[SK_PORT_A];
2580 sc_if1 = sc->sk_if[SK_PORT_B];
2582 ifp0 = sc_if0->sk_ifp;
2584 ifp1 = sc_if1->sk_ifp;
2589 sc->sk_suspended = 1;
2600 struct sk_softc *sc;
2601 struct sk_if_softc *sc_if0, *sc_if1;
2602 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2604 sc = device_get_softc(dev);
2608 sc_if0 = sc->sk_if[SK_PORT_A];
2609 sc_if1 = sc->sk_if[SK_PORT_B];
2611 ifp0 = sc_if0->sk_ifp;
2613 ifp1 = sc_if1->sk_ifp;
2614 if (ifp0 != NULL && ifp0->if_flags & IFF_UP)
2615 sk_init_locked(sc_if0);
2616 if (ifp1 != NULL && ifp1->if_flags & IFF_UP)
2617 sk_init_locked(sc_if1);
2618 sc->sk_suspended = 0;
2626 * According to the data sheet from SK-NET GENESIS the hardware can compute
2627 * two Rx checksums at the same time(Each checksum start position is
2628 * programmed in Rx descriptors). However it seems that TCP/UDP checksum
2629 * does not work at least on my Yukon hardware. I tried every possible ways
2630 * to get correct checksum value but couldn't get correct one. So TCP/UDP
2631 * checksum offload was disabled at the moment and only IP checksum offload
2633 * As nomral IP header size is 20 bytes I can't expect it would give an
2634 * increase in throughput. However it seems it doesn't hurt performance in
2635 * my testing. If there is a more detailed information for checksum secret
2636 * of the hardware in question please contact yongari@FreeBSD.org to add
2637 * TCP/UDP checksum offload support.
2639 static __inline void
2640 sk_rxcksum(ifp, m, csum)
2645 struct ether_header *eh;
2647 int32_t hlen, len, pktlen;
2648 u_int16_t csum1, csum2, ipcsum;
2650 pktlen = m->m_pkthdr.len;
2651 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
2653 eh = mtod(m, struct ether_header *);
2654 if (eh->ether_type != htons(ETHERTYPE_IP))
2656 ip = (struct ip *)(eh + 1);
2657 if (ip->ip_v != IPVERSION)
2659 hlen = ip->ip_hl << 2;
2660 pktlen -= sizeof(struct ether_header);
2661 if (hlen < sizeof(struct ip))
2663 if (ntohs(ip->ip_len) < hlen)
2665 if (ntohs(ip->ip_len) != pktlen)
2668 csum1 = htons(csum & 0xffff);
2669 csum2 = htons((csum >> 16) & 0xffff);
2670 ipcsum = in_addword(csum1, ~csum2 & 0xffff);
2671 /* checksum fixup for IP options */
2672 len = hlen - sizeof(struct ip);
2675 * If the second checksum value is correct we can compute IP
2676 * checksum with simple math. Unfortunately the second checksum
2677 * value is wrong so we can't verify the checksum from the
2678 * value(It seems there is some magic here to get correct
2679 * value). If the second checksum value is correct it also
2680 * means we can get TCP/UDP checksum) here. However, it still
2681 * needs pseudo header checksum calculation due to hardware
2686 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2687 if (ipcsum == 0xffff)
2688 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2692 sk_rxvalid(sc, stat, len)
2693 struct sk_softc *sc;
2694 u_int32_t stat, len;
2697 if (sc->sk_type == SK_GENESIS) {
2698 if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
2699 XM_RXSTAT_BYTES(stat) != len)
2702 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
2703 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
2704 YU_RXSTAT_JABBER)) != 0 ||
2705 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
2706 YU_RXSTAT_BYTES(stat) != len)
2715 struct sk_if_softc *sc_if;
2717 struct sk_softc *sc;
2720 struct sk_rx_desc *cur_rx;
2721 struct sk_rxdesc *rxd;
2723 u_int32_t csum, rxstat, sk_ctl;
2725 sc = sc_if->sk_softc;
2726 ifp = sc_if->sk_ifp;
2728 SK_IF_LOCK_ASSERT(sc_if);
2730 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2731 sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD);
2734 for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT;
2735 prog++, SK_INC(cons, SK_RX_RING_CNT)) {
2736 cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons];
2737 sk_ctl = le32toh(cur_rx->sk_ctl);
2738 if ((sk_ctl & SK_RXCTL_OWN) != 0)
2740 rxd = &sc_if->sk_cdata.sk_rxdesc[cons];
2741 rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2743 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2744 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2745 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2746 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2747 SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN ||
2748 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2750 sk_discard_rxbuf(sc_if, cons);
2755 csum = le32toh(cur_rx->sk_csum);
2756 if (sk_newbuf(sc_if, cons) != 0) {
2758 /* reuse old buffer */
2759 sk_discard_rxbuf(sc_if, cons);
2762 m->m_pkthdr.rcvif = ifp;
2763 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2765 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2766 sk_rxcksum(ifp, m, csum);
2767 SK_IF_UNLOCK(sc_if);
2768 (*ifp->if_input)(ifp, m);
2773 sc_if->sk_cdata.sk_rx_cons = cons;
2774 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2775 sc_if->sk_cdata.sk_rx_ring_map,
2776 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2781 sk_jumbo_rxeof(sc_if)
2782 struct sk_if_softc *sc_if;
2784 struct sk_softc *sc;
2787 struct sk_rx_desc *cur_rx;
2788 struct sk_rxdesc *jrxd;
2790 u_int32_t csum, rxstat, sk_ctl;
2792 sc = sc_if->sk_softc;
2793 ifp = sc_if->sk_ifp;
2795 SK_IF_LOCK_ASSERT(sc_if);
2797 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2798 sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD);
2801 for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons;
2802 prog < SK_JUMBO_RX_RING_CNT;
2803 prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) {
2804 cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons];
2805 sk_ctl = le32toh(cur_rx->sk_ctl);
2806 if ((sk_ctl & SK_RXCTL_OWN) != 0)
2808 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons];
2809 rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2811 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2812 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2813 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2814 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2815 SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN ||
2816 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2818 sk_discard_jumbo_rxbuf(sc_if, cons);
2823 csum = le32toh(cur_rx->sk_csum);
2824 if (sk_jumbo_newbuf(sc_if, cons) != 0) {
2826 /* reuse old buffer */
2827 sk_discard_jumbo_rxbuf(sc_if, cons);
2830 m->m_pkthdr.rcvif = ifp;
2831 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2833 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2834 sk_rxcksum(ifp, m, csum);
2835 SK_IF_UNLOCK(sc_if);
2836 (*ifp->if_input)(ifp, m);
2841 sc_if->sk_cdata.sk_jumbo_rx_cons = cons;
2842 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2843 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2844 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2850 struct sk_if_softc *sc_if;
2852 struct sk_softc *sc;
2853 struct sk_txdesc *txd;
2854 struct sk_tx_desc *cur_tx;
2856 u_int32_t idx, sk_ctl;
2858 sc = sc_if->sk_softc;
2859 ifp = sc_if->sk_ifp;
2861 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
2864 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2865 sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD);
2867 * Go through our tx ring and free mbufs for those
2868 * frames that have been sent.
2870 for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) {
2871 if (sc_if->sk_cdata.sk_tx_cnt <= 0)
2873 cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx];
2874 sk_ctl = le32toh(cur_tx->sk_ctl);
2875 if (sk_ctl & SK_TXCTL_OWN)
2877 sc_if->sk_cdata.sk_tx_cnt--;
2878 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2879 if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0)
2881 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2882 BUS_DMASYNC_POSTWRITE);
2883 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2888 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q);
2889 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
2890 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
2892 sc_if->sk_cdata.sk_tx_cons = idx;
2893 sc_if->sk_watchdog_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
2895 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2896 sc_if->sk_cdata.sk_tx_ring_map,
2897 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2904 struct sk_if_softc *sc_if;
2905 struct mii_data *mii;
2910 ifp = sc_if->sk_ifp;
2911 mii = device_get_softc(sc_if->sk_miibus);
2913 if (!(ifp->if_flags & IFF_UP))
2916 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2917 sk_intr_bcom(sc_if);
2922 * According to SysKonnect, the correct way to verify that
2923 * the link has come back up is to poll bit 0 of the GPIO
2924 * register three times. This pin has the signal from the
2925 * link_sync pin connected to it; if we read the same link
2926 * state 3 times in a row, we know the link is up.
2928 for (i = 0; i < 3; i++) {
2929 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2934 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
2938 /* Turn the GP0 interrupt back on. */
2939 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2940 SK_XM_READ_2(sc_if, XM_ISR);
2942 callout_stop(&sc_if->sk_tick_ch);
2946 sk_yukon_tick(xsc_if)
2949 struct sk_if_softc *sc_if;
2950 struct mii_data *mii;
2953 mii = device_get_softc(sc_if->sk_miibus);
2956 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
2961 struct sk_if_softc *sc_if;
2963 struct mii_data *mii;
2966 mii = device_get_softc(sc_if->sk_miibus);
2967 ifp = sc_if->sk_ifp;
2969 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2972 * Read the PHY interrupt register to make sure
2973 * we clear any pending interrupts.
2975 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2977 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2978 sk_init_xmac(sc_if);
2982 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2984 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
2987 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2989 /* Turn off the link LED. */
2990 SK_IF_WRITE_1(sc_if, 0,
2991 SK_LINKLED1_CTL, SK_LINKLED_OFF);
2993 } else if (status & BRGPHY_ISR_LNK_CHG) {
2994 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2995 BRGPHY_MII_IMR, 0xFF00);
2998 /* Turn on the link LED. */
2999 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3000 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
3001 SK_LINKLED_BLINK_OFF);
3004 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3008 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3015 struct sk_if_softc *sc_if;
3017 struct sk_softc *sc;
3020 sc = sc_if->sk_softc;
3021 status = SK_XM_READ_2(sc_if, XM_ISR);
3024 * Link has gone down. Start MII tick timeout to
3025 * watch for link resync.
3027 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
3028 if (status & XM_ISR_GP0_SET) {
3029 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
3030 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3033 if (status & XM_ISR_AUTONEG_DONE) {
3034 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3038 if (status & XM_IMR_TX_UNDERRUN)
3039 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
3041 if (status & XM_IMR_RX_OVERRUN)
3042 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
3044 status = SK_XM_READ_2(sc_if, XM_ISR);
3050 sk_intr_yukon(sc_if)
3051 struct sk_if_softc *sc_if;
3055 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
3057 if ((status & SK_GMAC_INT_RX_OVER) != 0) {
3058 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3059 SK_RFCTL_RX_FIFO_OVER);
3062 if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
3063 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3064 SK_TFCTL_TX_FIFO_UNDER);
3072 struct sk_softc *sc = xsc;
3073 struct sk_if_softc *sc_if0, *sc_if1;
3074 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
3079 status = CSR_READ_4(sc, SK_ISSR);
3080 if (status == 0 || status == 0xffffffff || sc->sk_suspended)
3083 sc_if0 = sc->sk_if[SK_PORT_A];
3084 sc_if1 = sc->sk_if[SK_PORT_B];
3087 ifp0 = sc_if0->sk_ifp;
3089 ifp1 = sc_if1->sk_ifp;
3091 for (; (status &= sc->sk_intrmask) != 0;) {
3092 /* Handle receive interrupts first. */
3093 if (status & SK_ISR_RX1_EOF) {
3094 if (ifp0->if_mtu > SK_MAX_FRAMELEN)
3095 sk_jumbo_rxeof(sc_if0);
3098 CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
3099 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3101 if (status & SK_ISR_RX2_EOF) {
3102 if (ifp1->if_mtu > SK_MAX_FRAMELEN)
3103 sk_jumbo_rxeof(sc_if1);
3106 CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
3107 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3110 /* Then transmit interrupts. */
3111 if (status & SK_ISR_TX1_S_EOF) {
3113 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF);
3115 if (status & SK_ISR_TX2_S_EOF) {
3117 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF);
3120 /* Then MAC interrupts. */
3121 if (status & SK_ISR_MAC1 &&
3122 ifp0->if_drv_flags & IFF_DRV_RUNNING) {
3123 if (sc->sk_type == SK_GENESIS)
3124 sk_intr_xmac(sc_if0);
3126 sk_intr_yukon(sc_if0);
3129 if (status & SK_ISR_MAC2 &&
3130 ifp1->if_drv_flags & IFF_DRV_RUNNING) {
3131 if (sc->sk_type == SK_GENESIS)
3132 sk_intr_xmac(sc_if1);
3134 sk_intr_yukon(sc_if1);
3137 if (status & SK_ISR_EXTERNAL_REG) {
3139 sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
3140 sk_intr_bcom(sc_if0);
3142 sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
3143 sk_intr_bcom(sc_if1);
3145 status = CSR_READ_4(sc, SK_ISSR);
3148 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3150 if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3151 sk_start_locked(ifp0);
3152 if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3153 sk_start_locked(ifp1);
3161 struct sk_if_softc *sc_if;
3163 struct sk_softc *sc;
3165 u_int16_t eaddr[(ETHER_ADDR_LEN+1)/2];
3166 struct sk_bcom_hack bhack[] = {
3167 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
3168 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
3169 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
3172 SK_IF_LOCK_ASSERT(sc_if);
3174 sc = sc_if->sk_softc;
3175 ifp = sc_if->sk_ifp;
3177 /* Unreset the XMAC. */
3178 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
3181 /* Reset the XMAC's internal state. */
3182 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3184 /* Save the XMAC II revision */
3185 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
3188 * Perform additional initialization for external PHYs,
3189 * namely for the 1000baseTX cards that use the XMAC's
3192 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3196 /* Take PHY out of reset. */
3197 val = sk_win_read_4(sc, SK_GPIO);
3198 if (sc_if->sk_port == SK_PORT_A)
3199 val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
3201 val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
3202 sk_win_write_4(sc, SK_GPIO, val);
3204 /* Enable GMII mode on the XMAC. */
3205 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
3207 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3208 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
3210 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3211 BRGPHY_MII_IMR, 0xFFF0);
3214 * Early versions of the BCM5400 apparently have
3215 * a bug that requires them to have their reserved
3216 * registers initialized to some magic values. I don't
3217 * know what the numbers do, I'm just the messenger.
3219 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
3221 while(bhack[i].reg) {
3222 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3223 bhack[i].reg, bhack[i].val);
3229 /* Set station address */
3230 bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN);
3231 SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]);
3232 SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]);
3233 SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]);
3234 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
3236 if (ifp->if_flags & IFF_BROADCAST) {
3237 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3239 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3242 /* We don't need the FCS appended to the packet. */
3243 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
3245 /* We want short frames padded to 60 bytes. */
3246 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
3249 * Enable the reception of all error frames. This is is
3250 * a necessary evil due to the design of the XMAC. The
3251 * XMAC's receive FIFO is only 8K in size, however jumbo
3252 * frames can be up to 9000 bytes in length. When bad
3253 * frame filtering is enabled, the XMAC's RX FIFO operates
3254 * in 'store and forward' mode. For this to work, the
3255 * entire frame has to fit into the FIFO, but that means
3256 * that jumbo frames larger than 8192 bytes will be
3257 * truncated. Disabling all bad frame filtering causes
3258 * the RX FIFO to operate in streaming mode, in which
3259 * case the XMAC will start transfering frames out of the
3260 * RX FIFO as soon as the FIFO threshold is reached.
3262 if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3263 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
3264 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
3265 XM_MODE_RX_INRANGELEN);
3266 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3268 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3271 * Bump up the transmit threshold. This helps hold off transmit
3272 * underruns when we're blasting traffic from both ports at once.
3274 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
3277 sk_rxfilter_genesis(sc_if);
3279 /* Clear and enable interrupts */
3280 SK_XM_READ_2(sc_if, XM_ISR);
3281 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
3282 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
3284 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3286 /* Configure MAC arbiter */
3287 switch(sc_if->sk_xmac_rev) {
3288 case XM_XMAC_REV_B2:
3289 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
3290 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
3291 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
3292 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
3293 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
3294 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
3295 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
3296 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
3297 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3299 case XM_XMAC_REV_C1:
3300 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
3301 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
3302 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
3303 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
3304 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
3305 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
3306 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
3307 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
3308 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3313 sk_win_write_2(sc, SK_MACARB_CTL,
3314 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
3322 sk_init_yukon(sc_if)
3323 struct sk_if_softc *sc_if;
3327 struct sk_softc *sc;
3331 SK_IF_LOCK_ASSERT(sc_if);
3333 sc = sc_if->sk_softc;
3334 ifp = sc_if->sk_ifp;
3336 if (sc->sk_type == SK_YUKON_LITE &&
3337 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3339 * Workaround code for COMA mode, set PHY reset.
3340 * Otherwise it will not correctly take chip out of
3343 v = sk_win_read_4(sc, SK_GPIO);
3344 v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
3345 sk_win_write_4(sc, SK_GPIO, v);
3348 /* GMAC and GPHY Reset */
3349 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
3350 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
3353 if (sc->sk_type == SK_YUKON_LITE &&
3354 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3356 * Workaround code for COMA mode, clear PHY reset
3358 v = sk_win_read_4(sc, SK_GPIO);
3361 sk_win_write_4(sc, SK_GPIO, v);
3364 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
3365 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
3367 if (sc->sk_coppertype)
3368 phy |= SK_GPHY_COPPER;
3370 phy |= SK_GPHY_FIBER;
3372 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
3374 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
3375 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
3376 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
3378 /* unused read of the interrupt source register */
3379 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
3381 reg = SK_YU_READ_2(sc_if, YUKON_PAR);
3383 /* MIB Counter Clear Mode set */
3384 reg |= YU_PAR_MIB_CLR;
3385 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3387 /* MIB Counter Clear Mode clear */
3388 reg &= ~YU_PAR_MIB_CLR;
3389 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3391 /* receive control reg */
3392 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
3394 /* transmit parameter register */
3395 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
3396 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
3398 /* serial mode register */
3399 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
3400 if (ifp->if_mtu > SK_MAX_FRAMELEN)
3401 reg |= YU_SMR_MFL_JUMBO;
3402 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
3404 /* Setup Yukon's address */
3405 for (i = 0; i < 3; i++) {
3406 /* Write Source Address 1 (unicast filter) */
3407 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
3408 IF_LLADDR(sc_if->sk_ifp)[i * 2] |
3409 IF_LLADDR(sc_if->sk_ifp)[i * 2 + 1] << 8);
3412 for (i = 0; i < 3; i++) {
3413 reg = sk_win_read_2(sc_if->sk_softc,
3414 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
3415 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
3419 sk_rxfilter_yukon(sc_if);
3421 /* enable interrupt mask for counter overflows */
3422 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
3423 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
3424 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
3426 /* Configure RX MAC FIFO Flush Mask */
3427 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
3428 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
3430 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
3432 /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
3433 if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
3434 v = SK_TFCTL_OPERATION_ON;
3436 v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
3437 /* Configure RX MAC FIFO */
3438 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
3439 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
3441 /* Increase flush threshould to 64 bytes */
3442 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
3443 SK_RFCTL_FIFO_THRESHOLD + 1);
3445 /* Configure TX MAC FIFO */
3446 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
3447 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
3451 * Note that to properly initialize any part of the GEnesis chip,
3452 * you first have to take it out of reset mode.
3458 struct sk_if_softc *sc_if = xsc;
3461 sk_init_locked(sc_if);
3462 SK_IF_UNLOCK(sc_if);
3468 sk_init_locked(sc_if)
3469 struct sk_if_softc *sc_if;
3471 struct sk_softc *sc;
3473 struct mii_data *mii;
3478 SK_IF_LOCK_ASSERT(sc_if);
3480 ifp = sc_if->sk_ifp;
3481 sc = sc_if->sk_softc;
3482 mii = device_get_softc(sc_if->sk_miibus);
3484 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3487 /* Cancel pending I/O and free all RX/TX buffers. */
3490 if (sc->sk_type == SK_GENESIS) {
3491 /* Configure LINK_SYNC LED */
3492 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
3493 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3494 SK_LINKLED_LINKSYNC_ON);
3496 /* Configure RX LED */
3497 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
3498 SK_RXLEDCTL_COUNTER_START);
3500 /* Configure TX LED */
3501 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
3502 SK_TXLEDCTL_COUNTER_START);
3506 * Configure descriptor poll timer
3508 * SK-NET GENESIS data sheet says that possibility of losing Start
3509 * transmit command due to CPU/cache related interim storage problems
3510 * under certain conditions. The document recommends a polling
3511 * mechanism to send a Start transmit command to initiate transfer
3512 * of ready descriptors regulary. To cope with this issue sk(4) now
3513 * enables descriptor poll timer to initiate descriptor processing
3514 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
3515 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
3516 * command instead of waiting for next descriptor polling time.
3517 * The same rule may apply to Rx side too but it seems that is not
3518 * needed at the moment.
3519 * Since sk(4) uses descriptor polling as a last resort there is no
3520 * need to set smaller polling time than maximum allowable one.
3522 SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
3524 /* Configure I2C registers */
3526 /* Configure XMAC(s) */
3527 switch (sc->sk_type) {
3529 sk_init_xmac(sc_if);
3534 sk_init_yukon(sc_if);
3539 if (sc->sk_type == SK_GENESIS) {
3540 /* Configure MAC FIFOs */
3541 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
3542 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
3543 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
3545 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
3546 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
3547 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
3550 /* Configure transmit arbiter(s) */
3551 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
3552 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
3554 /* Configure RAMbuffers */
3555 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
3556 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
3557 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
3558 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
3559 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
3560 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
3562 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
3563 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
3564 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
3565 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
3566 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
3567 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
3568 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
3570 /* Configure BMUs */
3571 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
3572 if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3573 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3574 SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3575 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3576 SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3578 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3579 SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0)));
3580 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3581 SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0)));
3584 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
3585 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
3586 SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0)));
3587 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI,
3588 SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0)));
3590 /* Init descriptors */
3591 if (ifp->if_mtu > SK_MAX_FRAMELEN)
3592 error = sk_init_jumbo_rx_ring(sc_if);
3594 error = sk_init_rx_ring(sc_if);
3596 device_printf(sc_if->sk_if_dev,
3597 "initialization failed: no memory for rx buffers\n");
3601 sk_init_tx_ring(sc_if);
3603 /* Set interrupt moderation if changed via sysctl. */
3604 imr = sk_win_read_4(sc, SK_IMTIMERINIT);
3605 if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) {
3606 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
3609 device_printf(sc_if->sk_if_dev,
3610 "interrupt moderation is %d us.\n",
3614 /* Configure interrupt handling */
3615 CSR_READ_4(sc, SK_ISSR);
3616 if (sc_if->sk_port == SK_PORT_A)
3617 sc->sk_intrmask |= SK_INTRS1;
3619 sc->sk_intrmask |= SK_INTRS2;
3621 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
3623 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3626 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
3628 switch(sc->sk_type) {
3630 /* Enable XMACs TX and RX state machines */
3631 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
3632 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3637 reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
3638 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
3640 /* XXX disable 100Mbps and full duplex mode? */
3641 reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS);
3643 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
3646 /* Activate descriptor polling timer */
3647 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
3648 /* start transfer of Tx descriptors */
3649 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
3651 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3652 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3654 switch (sc->sk_type) {
3658 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
3662 callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
3669 struct sk_if_softc *sc_if;
3672 struct sk_softc *sc;
3673 struct sk_txdesc *txd;
3674 struct sk_rxdesc *rxd;
3675 struct sk_rxdesc *jrxd;
3679 SK_IF_LOCK_ASSERT(sc_if);
3680 sc = sc_if->sk_softc;
3681 ifp = sc_if->sk_ifp;
3683 callout_stop(&sc_if->sk_tick_ch);
3684 callout_stop(&sc_if->sk_watchdog_ch);
3686 /* stop Tx descriptor polling timer */
3687 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
3688 /* stop transfer of Tx descriptors */
3689 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
3690 for (i = 0; i < SK_TIMEOUT; i++) {
3691 val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
3692 if ((val & SK_TXBMU_TX_STOP) == 0)
3696 if (i == SK_TIMEOUT)
3697 device_printf(sc_if->sk_if_dev,
3698 "can not stop transfer of Tx descriptor\n");
3699 /* stop transfer of Rx descriptors */
3700 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
3701 for (i = 0; i < SK_TIMEOUT; i++) {
3702 val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
3703 if ((val & SK_RXBMU_RX_STOP) == 0)
3707 if (i == SK_TIMEOUT)
3708 device_printf(sc_if->sk_if_dev,
3709 "can not stop transfer of Rx descriptor\n");
3711 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3712 /* Put PHY back into reset. */
3713 val = sk_win_read_4(sc, SK_GPIO);
3714 if (sc_if->sk_port == SK_PORT_A) {
3715 val |= SK_GPIO_DIR0;
3716 val &= ~SK_GPIO_DAT0;
3718 val |= SK_GPIO_DIR2;
3719 val &= ~SK_GPIO_DAT2;
3721 sk_win_write_4(sc, SK_GPIO, val);
3724 /* Turn off various components of this interface. */
3725 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3726 switch (sc->sk_type) {
3728 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
3729 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
3734 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
3735 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
3738 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
3739 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3740 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
3741 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3742 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
3743 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3744 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3745 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
3746 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
3748 /* Disable interrupts */
3749 if (sc_if->sk_port == SK_PORT_A)
3750 sc->sk_intrmask &= ~SK_INTRS1;
3752 sc->sk_intrmask &= ~SK_INTRS2;
3753 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3755 SK_XM_READ_2(sc_if, XM_ISR);
3756 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3758 /* Free RX and TX mbufs still in the queues. */
3759 for (i = 0; i < SK_RX_RING_CNT; i++) {
3760 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
3761 if (rxd->rx_m != NULL) {
3762 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag,
3763 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3764 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag,
3770 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
3771 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
3772 if (jrxd->rx_m != NULL) {
3773 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag,
3774 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3775 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
3777 m_freem(jrxd->rx_m);
3781 for (i = 0; i < SK_TX_RING_CNT; i++) {
3782 txd = &sc_if->sk_cdata.sk_txdesc[i];
3783 if (txd->tx_m != NULL) {
3784 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag,
3785 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3786 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag,
3793 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
3799 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3805 value = *(int *)arg1;
3806 error = sysctl_handle_int(oidp, &value, 0, req);
3807 if (error || !req->newptr)
3809 if (value < low || value > high)
3811 *(int *)arg1 = value;
3816 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)
3818 return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX));