1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */
4 * Copyright (c) 1997, 1998, 1999, 2000
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
57 * The XaQti XMAC II datasheet,
58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 * The SysKonnect GEnesis manual, http://www.syskonnect.com
61 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
72 * The SysKonnect gigabit ethernet adapters consist of two main
73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
75 * components and a PHY while the GEnesis controller provides a PCI
76 * interface with DMA support. Each card may have between 512K and
77 * 2MB of SRAM on board depending on the configuration.
79 * The SysKonnect GEnesis controller can have either one or two XMAC
80 * chips connected to it, allowing single or dual port NIC configurations.
81 * SysKonnect has the distinction of being the only vendor on the market
82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
84 * XMAC registers. This driver takes advantage of these features to allow
85 * both XMACs to operate as independent interfaces.
88 #include <sys/param.h>
89 #include <sys/systm.h>
91 #include <sys/endian.h>
93 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/module.h>
96 #include <sys/socket.h>
97 #include <sys/sockio.h>
98 #include <sys/queue.h>
99 #include <sys/sysctl.h>
102 #include <net/ethernet.h>
104 #include <net/if_arp.h>
105 #include <net/if_dl.h>
106 #include <net/if_media.h>
107 #include <net/if_types.h>
108 #include <net/if_vlan_var.h>
110 #include <netinet/in.h>
111 #include <netinet/in_systm.h>
112 #include <netinet/ip.h>
114 #include <machine/bus.h>
115 #include <machine/in_cksum.h>
116 #include <machine/resource.h>
117 #include <sys/rman.h>
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/brgphyreg.h>
123 #include <dev/pci/pcireg.h>
124 #include <dev/pci/pcivar.h>
127 #define SK_USEIOSPACE
130 #include <dev/sk/if_skreg.h>
131 #include <dev/sk/xmaciireg.h>
132 #include <dev/sk/yukonreg.h>
134 MODULE_DEPEND(sk, pci, 1, 1, 1);
135 MODULE_DEPEND(sk, ether, 1, 1, 1);
136 MODULE_DEPEND(sk, miibus, 1, 1, 1);
138 /* "device miibus" required. See GENERIC if you get errors here. */
139 #include "miibus_if.h"
142 static const char rcsid[] =
146 static struct sk_type sk_devs[] = {
150 "SysKonnect Gigabit Ethernet (V1.0)"
155 "SysKonnect Gigabit Ethernet (V2.0)"
160 "Marvell Gigabit Ethernet"
164 DEVICEID_BELKIN_5005,
165 "Belkin F5D5005 Gigabit Ethernet"
170 "3Com 3C940 Gigabit Ethernet"
174 DEVICEID_LINKSYS_EG1032,
175 "Linksys EG1032 Gigabit Ethernet"
179 DEVICEID_DLINK_DGE530T_A1,
180 "D-Link DGE-530T Gigabit Ethernet"
184 DEVICEID_DLINK_DGE530T_B1,
185 "D-Link DGE-530T Gigabit Ethernet"
190 static int skc_probe(device_t);
191 static int skc_attach(device_t);
192 static int skc_detach(device_t);
193 static void skc_shutdown(device_t);
194 static int skc_suspend(device_t);
195 static int skc_resume(device_t);
196 static int sk_detach(device_t);
197 static int sk_probe(device_t);
198 static int sk_attach(device_t);
199 static void sk_tick(void *);
200 static void sk_yukon_tick(void *);
201 static void sk_intr(void *);
202 static void sk_intr_xmac(struct sk_if_softc *);
203 static void sk_intr_bcom(struct sk_if_softc *);
204 static void sk_intr_yukon(struct sk_if_softc *);
205 static __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t);
206 static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
207 static void sk_rxeof(struct sk_if_softc *);
208 static void sk_jumbo_rxeof(struct sk_if_softc *);
209 static void sk_txeof(struct sk_if_softc *);
210 static void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *);
211 static int sk_encap(struct sk_if_softc *, struct mbuf **);
212 static void sk_start(struct ifnet *);
213 static void sk_start_locked(struct ifnet *);
214 static int sk_ioctl(struct ifnet *, u_long, caddr_t);
215 static void sk_init(void *);
216 static void sk_init_locked(struct sk_if_softc *);
217 static void sk_init_xmac(struct sk_if_softc *);
218 static void sk_init_yukon(struct sk_if_softc *);
219 static void sk_stop(struct sk_if_softc *);
220 static void sk_watchdog(void *);
221 static int sk_ifmedia_upd(struct ifnet *);
222 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
223 static void sk_reset(struct sk_softc *);
224 static __inline void sk_discard_rxbuf(struct sk_if_softc *, int);
225 static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int);
226 static int sk_newbuf(struct sk_if_softc *, int);
227 static int sk_jumbo_newbuf(struct sk_if_softc *, int);
228 static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
229 static int sk_dma_alloc(struct sk_if_softc *);
230 static void sk_dma_free(struct sk_if_softc *);
231 static void *sk_jalloc(struct sk_if_softc *);
232 static void sk_jfree(void *, void *);
233 static int sk_init_rx_ring(struct sk_if_softc *);
234 static int sk_init_jumbo_rx_ring(struct sk_if_softc *);
235 static void sk_init_tx_ring(struct sk_if_softc *);
236 static u_int32_t sk_win_read_4(struct sk_softc *, int);
237 static u_int16_t sk_win_read_2(struct sk_softc *, int);
238 static u_int8_t sk_win_read_1(struct sk_softc *, int);
239 static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
240 static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
241 static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
243 static int sk_miibus_readreg(device_t, int, int);
244 static int sk_miibus_writereg(device_t, int, int, int);
245 static void sk_miibus_statchg(device_t);
247 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
248 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
250 static void sk_xmac_miibus_statchg(struct sk_if_softc *);
252 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
253 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
255 static void sk_marv_miibus_statchg(struct sk_if_softc *);
257 static uint32_t sk_xmchash(const uint8_t *);
258 static uint32_t sk_gmchash(const uint8_t *);
259 static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int);
260 static void sk_setmulti(struct sk_if_softc *);
261 static void sk_setpromisc(struct sk_if_softc *);
263 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
264 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
267 * It seems that SK-NET GENESIS supports very simple checksum offload
268 * capability for Tx and I believe it can generate 0 checksum value for
269 * UDP packets in Tx as the hardware can't differenciate UDP packets from
270 * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it
271 * means sender didn't perforam checksum computation. For the safety I
272 * disabled UDP checksum offload capability at the moment. Alternatively
273 * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum
276 #define SK_CSUM_FEATURES (CSUM_TCP)
279 * Note that we have newbus methods for both the GEnesis controller
280 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
281 * the miibus code is a child of the XMACs. We need to do it this way
282 * so that the miibus drivers can access the PHY registers on the
283 * right PHY. It's not quite what I had in mind, but it's the only
284 * design that achieves the desired effect.
286 static device_method_t skc_methods[] = {
287 /* Device interface */
288 DEVMETHOD(device_probe, skc_probe),
289 DEVMETHOD(device_attach, skc_attach),
290 DEVMETHOD(device_detach, skc_detach),
291 DEVMETHOD(device_suspend, skc_suspend),
292 DEVMETHOD(device_resume, skc_resume),
293 DEVMETHOD(device_shutdown, skc_shutdown),
296 DEVMETHOD(bus_print_child, bus_generic_print_child),
297 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
302 static driver_t skc_driver = {
305 sizeof(struct sk_softc)
308 static devclass_t skc_devclass;
310 static device_method_t sk_methods[] = {
311 /* Device interface */
312 DEVMETHOD(device_probe, sk_probe),
313 DEVMETHOD(device_attach, sk_attach),
314 DEVMETHOD(device_detach, sk_detach),
315 DEVMETHOD(device_shutdown, bus_generic_shutdown),
318 DEVMETHOD(bus_print_child, bus_generic_print_child),
319 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
322 DEVMETHOD(miibus_readreg, sk_miibus_readreg),
323 DEVMETHOD(miibus_writereg, sk_miibus_writereg),
324 DEVMETHOD(miibus_statchg, sk_miibus_statchg),
329 static driver_t sk_driver = {
332 sizeof(struct sk_if_softc)
335 static devclass_t sk_devclass;
337 DRIVER_MODULE(skc, pci, skc_driver, skc_devclass, 0, 0);
338 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
339 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
341 static struct resource_spec sk_res_spec_io[] = {
342 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
343 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
347 static struct resource_spec sk_res_spec_mem[] = {
348 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
349 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
353 #define SK_SETBIT(sc, reg, x) \
354 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
356 #define SK_CLRBIT(sc, reg, x) \
357 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
359 #define SK_WIN_SETBIT_4(sc, reg, x) \
360 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
362 #define SK_WIN_CLRBIT_4(sc, reg, x) \
363 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
365 #define SK_WIN_SETBIT_2(sc, reg, x) \
366 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
368 #define SK_WIN_CLRBIT_2(sc, reg, x) \
369 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
372 sk_win_read_4(sc, reg)
377 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
378 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
380 return(CSR_READ_4(sc, reg));
385 sk_win_read_2(sc, reg)
390 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
391 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
393 return(CSR_READ_2(sc, reg));
398 sk_win_read_1(sc, reg)
403 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
404 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
406 return(CSR_READ_1(sc, reg));
411 sk_win_write_4(sc, reg, val)
417 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
418 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
420 CSR_WRITE_4(sc, reg, val);
426 sk_win_write_2(sc, reg, val)
432 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
433 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
435 CSR_WRITE_2(sc, reg, val);
441 sk_win_write_1(sc, reg, val)
447 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
448 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
450 CSR_WRITE_1(sc, reg, val);
456 sk_miibus_readreg(dev, phy, reg)
460 struct sk_if_softc *sc_if;
463 sc_if = device_get_softc(dev);
465 SK_IF_MII_LOCK(sc_if);
466 switch(sc_if->sk_softc->sk_type) {
468 v = sk_xmac_miibus_readreg(sc_if, phy, reg);
473 v = sk_marv_miibus_readreg(sc_if, phy, reg);
479 SK_IF_MII_UNLOCK(sc_if);
485 sk_miibus_writereg(dev, phy, reg, val)
489 struct sk_if_softc *sc_if;
492 sc_if = device_get_softc(dev);
494 SK_IF_MII_LOCK(sc_if);
495 switch(sc_if->sk_softc->sk_type) {
497 v = sk_xmac_miibus_writereg(sc_if, phy, reg, val);
502 v = sk_marv_miibus_writereg(sc_if, phy, reg, val);
508 SK_IF_MII_UNLOCK(sc_if);
514 sk_miibus_statchg(dev)
517 struct sk_if_softc *sc_if;
519 sc_if = device_get_softc(dev);
521 SK_IF_MII_LOCK(sc_if);
522 switch(sc_if->sk_softc->sk_type) {
524 sk_xmac_miibus_statchg(sc_if);
529 sk_marv_miibus_statchg(sc_if);
532 SK_IF_MII_UNLOCK(sc_if);
538 sk_xmac_miibus_readreg(sc_if, phy, reg)
539 struct sk_if_softc *sc_if;
544 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
547 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
548 SK_XM_READ_2(sc_if, XM_PHY_DATA);
549 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
550 for (i = 0; i < SK_TIMEOUT; i++) {
552 if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
553 XM_MMUCMD_PHYDATARDY)
557 if (i == SK_TIMEOUT) {
558 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
563 i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
569 sk_xmac_miibus_writereg(sc_if, phy, reg, val)
570 struct sk_if_softc *sc_if;
575 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
576 for (i = 0; i < SK_TIMEOUT; i++) {
577 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
581 if (i == SK_TIMEOUT) {
582 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
586 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
587 for (i = 0; i < SK_TIMEOUT; i++) {
589 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
593 if_printf(sc_if->sk_ifp, "phy write timed out\n");
599 sk_xmac_miibus_statchg(sc_if)
600 struct sk_if_softc *sc_if;
602 struct mii_data *mii;
604 mii = device_get_softc(sc_if->sk_miibus);
607 * If this is a GMII PHY, manually set the XMAC's
608 * duplex mode accordingly.
610 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
611 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
612 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
614 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
620 sk_marv_miibus_readreg(sc_if, phy, reg)
621 struct sk_if_softc *sc_if;
628 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
629 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
633 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
634 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
636 for (i = 0; i < SK_TIMEOUT; i++) {
638 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
639 if (val & YU_SMICR_READ_VALID)
643 if (i == SK_TIMEOUT) {
644 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
648 val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
654 sk_marv_miibus_writereg(sc_if, phy, reg, val)
655 struct sk_if_softc *sc_if;
660 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
661 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
662 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
664 for (i = 0; i < SK_TIMEOUT; i++) {
666 if ((SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) == 0)
670 if_printf(sc_if->sk_ifp, "phy write timeout\n");
676 sk_marv_miibus_statchg(sc_if)
677 struct sk_if_softc *sc_if;
690 /* Compute CRC for the address value. */
691 crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
693 return (~crc & ((1 << HASH_BITS) - 1));
696 /* gmchash is just a big endian crc */
703 /* Compute CRC for the address value. */
704 crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
706 return (crc & ((1 << HASH_BITS) - 1));
710 sk_setfilt(sc_if, addr, slot)
711 struct sk_if_softc *sc_if;
717 base = XM_RXFILT_ENTRY(slot);
719 SK_XM_WRITE_2(sc_if, base, addr[0]);
720 SK_XM_WRITE_2(sc_if, base + 2, addr[1]);
721 SK_XM_WRITE_2(sc_if, base + 4, addr[2]);
728 struct sk_if_softc *sc_if;
730 struct sk_softc *sc = sc_if->sk_softc;
731 struct ifnet *ifp = sc_if->sk_ifp;
732 u_int32_t hashes[2] = { 0, 0 };
734 struct ifmultiaddr *ifma;
735 u_int16_t dummy[] = { 0, 0, 0 };
736 u_int16_t maddr[(ETHER_ADDR_LEN+1)/2];
738 SK_IF_LOCK_ASSERT(sc_if);
740 /* First, zot all the existing filters. */
741 switch(sc->sk_type) {
743 for (i = 1; i < XM_RXFILT_MAX; i++)
744 sk_setfilt(sc_if, dummy, i);
746 SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
747 SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
752 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
753 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
754 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
755 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
759 /* Now program new ones. */
760 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
761 hashes[0] = 0xFFFFFFFF;
762 hashes[1] = 0xFFFFFFFF;
766 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
767 if (ifma->ifma_addr->sa_family != AF_LINK)
770 * Program the first XM_RXFILT_MAX multicast groups
771 * into the perfect filter. For all others,
772 * use the hash table.
774 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) {
776 (struct sockaddr_dl *)ifma->ifma_addr),
777 maddr, ETHER_ADDR_LEN);
778 sk_setfilt(sc_if, maddr, i);
783 switch(sc->sk_type) {
786 (struct sockaddr_dl *)ifma->ifma_addr),
787 maddr, ETHER_ADDR_LEN);
788 h = sk_xmchash((const uint8_t *)maddr);
794 (struct sockaddr_dl *)ifma->ifma_addr),
795 maddr, ETHER_ADDR_LEN);
796 h = sk_gmchash((const uint8_t *)maddr);
800 hashes[0] |= (1 << h);
802 hashes[1] |= (1 << (h - 32));
807 switch(sc->sk_type) {
809 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
810 XM_MODE_RX_USE_PERFECT);
811 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
812 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
817 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
818 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
819 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
820 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
829 struct sk_if_softc *sc_if;
831 struct sk_softc *sc = sc_if->sk_softc;
832 struct ifnet *ifp = sc_if->sk_ifp;
834 SK_IF_LOCK_ASSERT(sc_if);
836 switch(sc->sk_type) {
838 if (ifp->if_flags & IFF_PROMISC) {
839 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
841 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
847 if (ifp->if_flags & IFF_PROMISC) {
848 SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
849 YU_RCR_UFLEN | YU_RCR_MUFLEN);
851 SK_YU_SETBIT_2(sc_if, YUKON_RCR,
852 YU_RCR_UFLEN | YU_RCR_MUFLEN);
861 sk_init_rx_ring(sc_if)
862 struct sk_if_softc *sc_if;
864 struct sk_ring_data *rd;
866 u_int32_t csum_start;
869 sc_if->sk_cdata.sk_rx_cons = 0;
871 csum_start = (ETHER_HDR_LEN + sizeof(struct ip)) << 16 |
873 rd = &sc_if->sk_rdata;
874 bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
875 for (i = 0; i < SK_RX_RING_CNT; i++) {
876 if (sk_newbuf(sc_if, i) != 0)
878 if (i == (SK_RX_RING_CNT - 1))
879 addr = SK_RX_RING_ADDR(sc_if, 0);
881 addr = SK_RX_RING_ADDR(sc_if, i + 1);
882 rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
883 rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start);
886 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
887 sc_if->sk_cdata.sk_rx_ring_map,
888 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
894 sk_init_jumbo_rx_ring(sc_if)
895 struct sk_if_softc *sc_if;
897 struct sk_ring_data *rd;
899 u_int32_t csum_start;
902 sc_if->sk_cdata.sk_jumbo_rx_cons = 0;
904 csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) |
906 rd = &sc_if->sk_rdata;
907 bzero(rd->sk_jumbo_rx_ring,
908 sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT);
909 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
910 if (sk_jumbo_newbuf(sc_if, i) != 0)
912 if (i == (SK_JUMBO_RX_RING_CNT - 1))
913 addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0);
915 addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1);
916 rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
917 rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start);
920 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
921 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
922 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
928 sk_init_tx_ring(sc_if)
929 struct sk_if_softc *sc_if;
931 struct sk_ring_data *rd;
932 struct sk_txdesc *txd;
936 STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq);
937 STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq);
939 sc_if->sk_cdata.sk_tx_prod = 0;
940 sc_if->sk_cdata.sk_tx_cons = 0;
941 sc_if->sk_cdata.sk_tx_cnt = 0;
943 rd = &sc_if->sk_rdata;
944 bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
945 for (i = 0; i < SK_TX_RING_CNT; i++) {
946 if (i == (SK_TX_RING_CNT - 1))
947 addr = SK_TX_RING_ADDR(sc_if, 0);
949 addr = SK_TX_RING_ADDR(sc_if, i + 1);
950 rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
951 txd = &sc_if->sk_cdata.sk_txdesc[i];
952 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
955 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
956 sc_if->sk_cdata.sk_tx_ring_map,
957 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
961 sk_discard_rxbuf(sc_if, idx)
962 struct sk_if_softc *sc_if;
965 struct sk_rx_desc *r;
966 struct sk_rxdesc *rxd;
970 r = &sc_if->sk_rdata.sk_rx_ring[idx];
971 rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
973 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
977 sk_discard_jumbo_rxbuf(sc_if, idx)
978 struct sk_if_softc *sc_if;
981 struct sk_rx_desc *r;
982 struct sk_rxdesc *rxd;
985 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
986 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
988 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
992 sk_newbuf(sc_if, idx)
993 struct sk_if_softc *sc_if;
996 struct sk_rx_desc *r;
997 struct sk_rxdesc *rxd;
999 bus_dma_segment_t segs[1];
1003 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1006 m->m_len = m->m_pkthdr.len = MCLBYTES;
1007 m_adj(m, ETHER_ALIGN);
1009 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag,
1010 sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1014 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1016 rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
1017 if (rxd->rx_m != NULL) {
1018 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
1019 BUS_DMASYNC_POSTREAD);
1020 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap);
1022 map = rxd->rx_dmamap;
1023 rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap;
1024 sc_if->sk_cdata.sk_rx_sparemap = map;
1025 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
1026 BUS_DMASYNC_PREREAD);
1028 r = &sc_if->sk_rdata.sk_rx_ring[idx];
1029 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
1030 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
1031 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1037 sk_jumbo_newbuf(sc_if, idx)
1038 struct sk_if_softc *sc_if;
1041 struct sk_rx_desc *r;
1042 struct sk_rxdesc *rxd;
1044 bus_dma_segment_t segs[1];
1049 MGETHDR(m, M_DONTWAIT, MT_DATA);
1052 buf = sk_jalloc(sc_if);
1057 /* Attach the buffer to the mbuf */
1058 MEXTADD(m, buf, SK_JLEN, sk_jfree, (struct sk_if_softc *)sc_if, 0,
1060 if ((m->m_flags & M_EXT) == 0) {
1064 m->m_pkthdr.len = m->m_len = SK_JLEN;
1066 * Adjust alignment so packet payload begins on a
1067 * longword boundary. Mandatory for Alpha, useful on
1070 m_adj(m, ETHER_ALIGN);
1072 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag,
1073 sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1077 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1079 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
1080 if (rxd->rx_m != NULL) {
1081 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1082 BUS_DMASYNC_POSTREAD);
1083 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
1086 map = rxd->rx_dmamap;
1087 rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap;
1088 sc_if->sk_cdata.sk_jumbo_rx_sparemap = map;
1089 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1090 BUS_DMASYNC_PREREAD);
1092 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
1093 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
1094 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
1095 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1101 * Set media options.
1107 struct sk_if_softc *sc_if = ifp->if_softc;
1108 struct mii_data *mii;
1110 mii = device_get_softc(sc_if->sk_miibus);
1118 * Report current media status.
1121 sk_ifmedia_sts(ifp, ifmr)
1123 struct ifmediareq *ifmr;
1125 struct sk_if_softc *sc_if;
1126 struct mii_data *mii;
1128 sc_if = ifp->if_softc;
1129 mii = device_get_softc(sc_if->sk_miibus);
1132 ifmr->ifm_active = mii->mii_media_active;
1133 ifmr->ifm_status = mii->mii_media_status;
1139 sk_ioctl(ifp, command, data)
1144 struct sk_if_softc *sc_if = ifp->if_softc;
1145 struct ifreq *ifr = (struct ifreq *) data;
1147 struct mii_data *mii;
1153 if (ifr->ifr_mtu > SK_JUMBO_MTU)
1156 ifp->if_mtu = ifr->ifr_mtu;
1157 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1158 sk_init_locked(sc_if);
1160 SK_IF_UNLOCK(sc_if);
1164 if (ifp->if_flags & IFF_UP) {
1165 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1166 if ((ifp->if_flags ^ sc_if->sk_if_flags)
1168 sk_setpromisc(sc_if);
1172 sk_init_locked(sc_if);
1174 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1177 sc_if->sk_if_flags = ifp->if_flags;
1178 SK_IF_UNLOCK(sc_if);
1183 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1185 SK_IF_UNLOCK(sc_if);
1189 mii = device_get_softc(sc_if->sk_miibus);
1190 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1194 if (sc_if->sk_softc->sk_type == SK_GENESIS) {
1195 SK_IF_UNLOCK(sc_if);
1198 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1199 if (mask & IFCAP_HWCSUM) {
1200 ifp->if_capenable ^= IFCAP_HWCSUM;
1201 if (IFCAP_HWCSUM & ifp->if_capenable &&
1202 IFCAP_HWCSUM & ifp->if_capabilities)
1203 ifp->if_hwassist = SK_CSUM_FEATURES;
1205 ifp->if_hwassist = 0;
1207 SK_IF_UNLOCK(sc_if);
1210 error = ether_ioctl(ifp, command, data);
1218 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1219 * IDs against our list and return a device name if we find a match.
1225 struct sk_type *t = sk_devs;
1227 while(t->sk_name != NULL) {
1228 if ((pci_get_vendor(dev) == t->sk_vid) &&
1229 (pci_get_device(dev) == t->sk_did)) {
1231 * Only attach to rev. 2 of the Linksys EG1032 adapter.
1232 * Rev. 3 is supported by re(4).
1234 if ((t->sk_vid == VENDORID_LINKSYS) &&
1235 (t->sk_did == DEVICEID_LINKSYS_EG1032) &&
1236 (pci_get_subdevice(dev) !=
1237 SUBDEVICEID_LINKSYS_EG1032_REV2)) {
1241 device_set_desc(dev, t->sk_name);
1242 return (BUS_PROBE_DEFAULT);
1251 * Force the GEnesis into reset, then bring it out of reset.
1255 struct sk_softc *sc;
1258 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1259 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1260 if (SK_YUKON_FAMILY(sc->sk_type))
1261 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1264 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1266 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1267 if (SK_YUKON_FAMILY(sc->sk_type))
1268 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1270 if (sc->sk_type == SK_GENESIS) {
1271 /* Configure packet arbiter */
1272 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1273 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1274 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1275 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1276 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1279 /* Enable RAM interface */
1280 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1283 * Configure interrupt moderation. The moderation timer
1284 * defers interrupts specified in the interrupt moderation
1285 * timer mask based on the timeout specified in the interrupt
1286 * moderation timer init register. Each bit in the timer
1287 * register represents one tick, so to specify a timeout in
1288 * microseconds, we have to multiply by the correct number of
1289 * ticks-per-microsecond.
1291 switch (sc->sk_type) {
1293 sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS;
1296 sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON;
1300 device_printf(sc->sk_dev, "interrupt moderation is %d us\n",
1302 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
1304 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1305 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1306 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1315 struct sk_softc *sc;
1317 sc = device_get_softc(device_get_parent(dev));
1320 * Not much to do here. We always know there will be
1321 * at least one XMAC present, and if there are two,
1322 * skc_attach() will create a second device instance
1325 switch (sc->sk_type) {
1327 device_set_desc(dev, "XaQti Corp. XMAC II");
1332 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1336 return (BUS_PROBE_DEFAULT);
1340 * Each XMAC chip is attached as a separate logical IP interface.
1341 * Single port cards will have only one logical interface of course.
1347 struct sk_softc *sc;
1348 struct sk_if_softc *sc_if;
1357 sc_if = device_get_softc(dev);
1358 sc = device_get_softc(device_get_parent(dev));
1359 port = *(int *)device_get_ivars(dev);
1361 sc_if->sk_if_dev = dev;
1362 sc_if->sk_port = port;
1363 sc_if->sk_softc = sc;
1364 sc->sk_if[port] = sc_if;
1365 if (port == SK_PORT_A)
1366 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1367 if (port == SK_PORT_B)
1368 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1370 callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0);
1371 callout_init_mtx(&sc_if->sk_watchdog_ch, &sc_if->sk_softc->sk_mtx, 0);
1373 if (sk_dma_alloc(sc_if) != 0) {
1378 ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
1380 device_printf(sc_if->sk_if_dev, "can not if_alloc()\n");
1384 ifp->if_softc = sc_if;
1385 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1386 ifp->if_mtu = ETHERMTU;
1387 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1389 * SK_GENESIS has a bug in checksum offload - From linux.
1391 if (sc_if->sk_softc->sk_type != SK_GENESIS) {
1392 ifp->if_capabilities = IFCAP_HWCSUM;
1393 ifp->if_hwassist = SK_CSUM_FEATURES;
1395 ifp->if_capabilities = 0;
1396 ifp->if_hwassist = 0;
1398 ifp->if_capenable = ifp->if_capabilities;
1399 ifp->if_ioctl = sk_ioctl;
1400 ifp->if_start = sk_start;
1402 ifp->if_watchdog = NULL;
1403 ifp->if_init = sk_init;
1404 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1405 ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
1406 IFQ_SET_READY(&ifp->if_snd);
1409 * Get station address for this interface. Note that
1410 * dual port cards actually come with three station
1411 * addresses: one for each port, plus an extra. The
1412 * extra one is used by the SysKonnect driver software
1413 * as a 'virtual' station address for when both ports
1414 * are operating in failover mode. Currently we don't
1415 * use this extra address.
1418 for (i = 0; i < ETHER_ADDR_LEN; i++)
1420 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1423 * Set up RAM buffer addresses. The NIC will have a certain
1424 * amount of SRAM on it, somewhere between 512K and 2MB. We
1425 * need to divide this up a) between the transmitter and
1426 * receiver and b) between the two XMACs, if this is a
1427 * dual port NIC. Our algotithm is to divide up the memory
1428 * evenly so that everyone gets a fair share.
1430 * Just to be contrary, Yukon2 appears to have separate memory
1433 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1434 u_int32_t chunk, val;
1436 chunk = sc->sk_ramsize / 2;
1437 val = sc->sk_rboff / sizeof(u_int64_t);
1438 sc_if->sk_rx_ramstart = val;
1439 val += (chunk / sizeof(u_int64_t));
1440 sc_if->sk_rx_ramend = val - 1;
1441 sc_if->sk_tx_ramstart = val;
1442 val += (chunk / sizeof(u_int64_t));
1443 sc_if->sk_tx_ramend = val - 1;
1445 u_int32_t chunk, val;
1447 chunk = sc->sk_ramsize / 4;
1448 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1450 sc_if->sk_rx_ramstart = val;
1451 val += (chunk / sizeof(u_int64_t));
1452 sc_if->sk_rx_ramend = val - 1;
1453 sc_if->sk_tx_ramstart = val;
1454 val += (chunk / sizeof(u_int64_t));
1455 sc_if->sk_tx_ramend = val - 1;
1458 /* Read and save PHY type and set PHY address */
1459 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1460 if (!SK_YUKON_FAMILY(sc->sk_type)) {
1461 switch(sc_if->sk_phytype) {
1462 case SK_PHYTYPE_XMAC:
1463 sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1465 case SK_PHYTYPE_BCOM:
1466 sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1469 device_printf(sc->sk_dev, "unsupported PHY type: %d\n",
1472 SK_IF_UNLOCK(sc_if);
1476 if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
1477 sc->sk_pmd != 'S') {
1478 /* not initialized, punt */
1479 sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
1480 sc->sk_coppertype = 1;
1483 sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1485 if (!(sc->sk_coppertype))
1486 sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
1490 * Call MI attach routine. Can't hold locks when calling into ether_*.
1492 SK_IF_UNLOCK(sc_if);
1493 ether_ifattach(ifp, eaddr);
1497 * The hardware should be ready for VLAN_MTU by default:
1498 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
1499 * YU_SMR_MFL_VLAN is set by this driver in Yukon.
1502 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1503 ifp->if_capenable |= IFCAP_VLAN_MTU;
1505 * Tell the upper layer(s) we support long frames.
1506 * Must appear after the call to ether_ifattach() because
1507 * ether_ifattach() sets ifi_hdrlen to the default value.
1509 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1514 switch (sc->sk_type) {
1516 sk_init_xmac(sc_if);
1521 sk_init_yukon(sc_if);
1525 SK_IF_UNLOCK(sc_if);
1526 if (mii_phy_probe(dev, &sc_if->sk_miibus,
1527 sk_ifmedia_upd, sk_ifmedia_sts)) {
1528 device_printf(sc_if->sk_if_dev, "no PHY found!\n");
1529 ether_ifdetach(ifp);
1536 /* Access should be ok even though lock has been dropped */
1537 sc->sk_if[port] = NULL;
1545 * Attach the interface. Allocate softc structures, do ifmedia
1546 * setup and ethernet/BPF attach.
1552 struct sk_softc *sc;
1553 int error = 0, *port;
1555 const char *pname = NULL;
1558 sc = device_get_softc(dev);
1561 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1563 mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF);
1565 * Map control/status registers.
1567 pci_enable_busmaster(dev);
1569 /* Allocate resources */
1570 #ifdef SK_USEIOSPACE
1571 sc->sk_res_spec = sk_res_spec_io;
1573 sc->sk_res_spec = sk_res_spec_mem;
1575 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1577 if (sc->sk_res_spec == sk_res_spec_mem)
1578 sc->sk_res_spec = sk_res_spec_io;
1580 sc->sk_res_spec = sk_res_spec_mem;
1581 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1583 device_printf(dev, "couldn't allocate %s resources\n",
1584 sc->sk_res_spec == sk_res_spec_mem ? "memory" :
1590 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1591 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
1593 /* Bail out if chip is not recognized. */
1594 if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
1595 device_printf(dev, "unknown device: chipver=%02x, rev=%x\n",
1596 sc->sk_type, sc->sk_rev);
1601 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1602 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1603 OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW,
1604 &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I",
1605 "SK interrupt moderation");
1607 /* Pull in device tunables. */
1608 sc->sk_int_mod = SK_IM_DEFAULT;
1609 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1610 "int_mod", &sc->sk_int_mod);
1612 if (sc->sk_int_mod < SK_IM_MIN ||
1613 sc->sk_int_mod > SK_IM_MAX) {
1614 device_printf(dev, "int_mod value out of range; "
1615 "using default: %d\n", SK_IM_DEFAULT);
1616 sc->sk_int_mod = SK_IM_DEFAULT;
1620 /* Reset the adapter. */
1623 skrs = sk_win_read_1(sc, SK_EPROM0);
1624 if (sc->sk_type == SK_GENESIS) {
1625 /* Read and save RAM size and RAMbuffer offset */
1627 case SK_RAMSIZE_512K_64:
1628 sc->sk_ramsize = 0x80000;
1629 sc->sk_rboff = SK_RBOFF_0;
1631 case SK_RAMSIZE_1024K_64:
1632 sc->sk_ramsize = 0x100000;
1633 sc->sk_rboff = SK_RBOFF_80000;
1635 case SK_RAMSIZE_1024K_128:
1636 sc->sk_ramsize = 0x100000;
1637 sc->sk_rboff = SK_RBOFF_0;
1639 case SK_RAMSIZE_2048K_128:
1640 sc->sk_ramsize = 0x200000;
1641 sc->sk_rboff = SK_RBOFF_0;
1644 device_printf(dev, "unknown ram size: %d\n", skrs);
1648 } else { /* SK_YUKON_FAMILY */
1650 sc->sk_ramsize = 0x20000;
1652 sc->sk_ramsize = skrs * (1<<12);
1653 sc->sk_rboff = SK_RBOFF_0;
1656 /* Read and save physical media type */
1657 sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1659 if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
1660 sc->sk_coppertype = 1;
1662 sc->sk_coppertype = 0;
1664 /* Determine whether to name it with VPD PN or just make it up.
1665 * Marvell Yukon VPD PN seems to freqently be bogus. */
1666 switch (pci_get_device(dev)) {
1667 case DEVICEID_SK_V1:
1668 case DEVICEID_BELKIN_5005:
1669 case DEVICEID_3COM_3C940:
1670 case DEVICEID_LINKSYS_EG1032:
1671 case DEVICEID_DLINK_DGE530T_A1:
1672 case DEVICEID_DLINK_DGE530T_B1:
1673 /* Stay with VPD PN. */
1674 (void) pci_get_vpd_ident(dev, &pname);
1676 case DEVICEID_SK_V2:
1677 /* YUKON VPD PN might bear no resemblance to reality. */
1678 switch (sc->sk_type) {
1680 /* Stay with VPD PN. */
1681 (void) pci_get_vpd_ident(dev, &pname);
1684 pname = "Marvell Yukon Gigabit Ethernet";
1687 pname = "Marvell Yukon Lite Gigabit Ethernet";
1690 pname = "Marvell Yukon LP Gigabit Ethernet";
1693 pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
1697 /* Yukon Lite Rev. A0 needs special test. */
1698 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1702 /* Save flash address register before testing. */
1703 far = sk_win_read_4(sc, SK_EP_ADDR);
1705 sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
1706 testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
1708 if (testbyte != 0x00) {
1709 /* Yukon Lite Rev. A0 detected. */
1710 sc->sk_type = SK_YUKON_LITE;
1711 sc->sk_rev = SK_YUKON_LITE_REV_A0;
1712 /* Restore flash address register. */
1713 sk_win_write_4(sc, SK_EP_ADDR, far);
1718 device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
1719 "chipver=%02x, rev=%x\n",
1720 pci_get_vendor(dev), pci_get_device(dev),
1721 sc->sk_type, sc->sk_rev);
1726 if (sc->sk_type == SK_YUKON_LITE) {
1727 switch (sc->sk_rev) {
1728 case SK_YUKON_LITE_REV_A0:
1731 case SK_YUKON_LITE_REV_A1:
1734 case SK_YUKON_LITE_REV_A3:
1745 /* Announce the product name and more VPD data if there. */
1747 device_printf(dev, "%s rev. %s(0x%x)\n",
1748 pname, revstr, sc->sk_rev);
1751 device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type);
1752 device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev);
1753 device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs);
1754 device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
1757 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1758 if (sc->sk_devs[SK_PORT_A] == NULL) {
1759 device_printf(dev, "failed to add child for PORT_A\n");
1763 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1765 device_printf(dev, "failed to allocate memory for "
1766 "ivars of PORT_A\n");
1771 device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1773 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1774 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1775 if (sc->sk_devs[SK_PORT_B] == NULL) {
1776 device_printf(dev, "failed to add child for PORT_B\n");
1780 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1782 device_printf(dev, "failed to allocate memory for "
1783 "ivars of PORT_B\n");
1788 device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1791 /* Turn on the 'driver is loaded' LED. */
1792 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1794 error = bus_generic_attach(dev);
1796 device_printf(dev, "failed to attach port(s)\n");
1800 /* Hook interrupt last to avoid having to lock softc */
1801 error = bus_setup_intr(dev, sc->sk_res[1], INTR_TYPE_NET|INTR_MPSAFE,
1802 NULL, sk_intr, sc, &sc->sk_intrhand);
1805 device_printf(dev, "couldn't set up irq\n");
1817 * Shutdown hardware and free up resources. This can be called any
1818 * time after the mutex has been initialized. It is called in both
1819 * the error case in attach and the normal detach case so it needs
1820 * to be careful about only freeing resources that have actually been
1827 struct sk_if_softc *sc_if;
1830 sc_if = device_get_softc(dev);
1831 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1832 ("sk mutex not initialized in sk_detach"));
1835 ifp = sc_if->sk_ifp;
1836 /* These should only be active if attach_xmac succeeded */
1837 if (device_is_attached(dev)) {
1839 /* Can't hold locks while calling detach */
1840 SK_IF_UNLOCK(sc_if);
1841 callout_drain(&sc_if->sk_tick_ch);
1842 callout_drain(&sc_if->sk_watchdog_ch);
1843 ether_ifdetach(ifp);
1849 * We're generally called from skc_detach() which is using
1850 * device_delete_child() to get to here. It's already trashed
1851 * miibus for us, so don't do it here or we'll panic.
1854 if (sc_if->sk_miibus != NULL)
1855 device_delete_child(dev, sc_if->sk_miibus);
1857 bus_generic_detach(dev);
1859 SK_IF_UNLOCK(sc_if);
1868 struct sk_softc *sc;
1870 sc = device_get_softc(dev);
1871 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1873 if (device_is_alive(dev)) {
1874 if (sc->sk_devs[SK_PORT_A] != NULL) {
1875 free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
1876 device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1878 if (sc->sk_devs[SK_PORT_B] != NULL) {
1879 free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
1880 device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1882 bus_generic_detach(dev);
1885 if (sc->sk_intrhand)
1886 bus_teardown_intr(dev, sc->sk_res[1], sc->sk_intrhand);
1887 bus_release_resources(dev, sc->sk_res_spec, sc->sk_res);
1889 mtx_destroy(&sc->sk_mii_mtx);
1890 mtx_destroy(&sc->sk_mtx);
1895 struct sk_dmamap_arg {
1896 bus_addr_t sk_busaddr;
1900 sk_dmamap_cb(arg, segs, nseg, error)
1902 bus_dma_segment_t *segs;
1906 struct sk_dmamap_arg *ctx;
1912 ctx->sk_busaddr = segs[0].ds_addr;
1916 * Allocate jumbo buffer storage. The SysKonnect adapters support
1917 * "jumbograms" (9K frames), although SysKonnect doesn't currently
1918 * use them in their drivers. In order for us to use them, we need
1919 * large 9K receive buffers, however standard mbuf clusters are only
1920 * 2048 bytes in size. Consequently, we need to allocate and manage
1921 * our own jumbo buffer pool. Fortunately, this does not require an
1922 * excessive amount of additional code.
1926 struct sk_if_softc *sc_if;
1928 struct sk_dmamap_arg ctx;
1929 struct sk_txdesc *txd;
1930 struct sk_rxdesc *rxd;
1931 struct sk_rxdesc *jrxd;
1933 struct sk_jpool_entry *entry;
1936 mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF);
1937 SLIST_INIT(&sc_if->sk_jfree_listhead);
1938 SLIST_INIT(&sc_if->sk_jinuse_listhead);
1940 /* create parent tag */
1943 * This driver should use BUS_SPACE_MAXADDR for lowaddr argument
1944 * in bus_dma_tag_create(9) as the NIC would support DAC mode.
1945 * However bz@ reported that it does not work on amd64 with > 4GB
1946 * RAM. Until we have more clues of the breakage, disable DAC mode
1947 * by limiting DMA address to be in 32bit address space.
1949 error = bus_dma_tag_create(
1950 bus_get_dma_tag(sc_if->sk_if_dev),/* parent */
1951 1, 0, /* algnmnt, boundary */
1952 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1953 BUS_SPACE_MAXADDR, /* highaddr */
1954 NULL, NULL, /* filter, filterarg */
1955 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1957 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1959 NULL, NULL, /* lockfunc, lockarg */
1960 &sc_if->sk_cdata.sk_parent_tag);
1962 device_printf(sc_if->sk_if_dev,
1963 "failed to create parent DMA tag\n");
1966 /* create tag for Tx ring */
1967 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1968 SK_RING_ALIGN, 0, /* algnmnt, boundary */
1969 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1970 BUS_SPACE_MAXADDR, /* highaddr */
1971 NULL, NULL, /* filter, filterarg */
1972 SK_TX_RING_SZ, /* maxsize */
1974 SK_TX_RING_SZ, /* maxsegsize */
1976 NULL, NULL, /* lockfunc, lockarg */
1977 &sc_if->sk_cdata.sk_tx_ring_tag);
1979 device_printf(sc_if->sk_if_dev,
1980 "failed to allocate Tx ring DMA tag\n");
1984 /* create tag for Rx ring */
1985 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1986 SK_RING_ALIGN, 0, /* algnmnt, boundary */
1987 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1988 BUS_SPACE_MAXADDR, /* highaddr */
1989 NULL, NULL, /* filter, filterarg */
1990 SK_RX_RING_SZ, /* maxsize */
1992 SK_RX_RING_SZ, /* maxsegsize */
1994 NULL, NULL, /* lockfunc, lockarg */
1995 &sc_if->sk_cdata.sk_rx_ring_tag);
1997 device_printf(sc_if->sk_if_dev,
1998 "failed to allocate Rx ring DMA tag\n");
2002 /* create tag for jumbo Rx ring */
2003 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2004 SK_RING_ALIGN, 0, /* algnmnt, boundary */
2005 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2006 BUS_SPACE_MAXADDR, /* highaddr */
2007 NULL, NULL, /* filter, filterarg */
2008 SK_JUMBO_RX_RING_SZ, /* maxsize */
2010 SK_JUMBO_RX_RING_SZ, /* maxsegsize */
2012 NULL, NULL, /* lockfunc, lockarg */
2013 &sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2015 device_printf(sc_if->sk_if_dev,
2016 "failed to allocate jumbo Rx ring DMA tag\n");
2020 /* create tag for jumbo buffer blocks */
2021 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2022 PAGE_SIZE, 0, /* algnmnt, boundary */
2023 BUS_SPACE_MAXADDR, /* lowaddr */
2024 BUS_SPACE_MAXADDR, /* highaddr */
2025 NULL, NULL, /* filter, filterarg */
2026 SK_JMEM, /* maxsize */
2028 SK_JMEM, /* maxsegsize */
2030 NULL, NULL, /* lockfunc, lockarg */
2031 &sc_if->sk_cdata.sk_jumbo_tag);
2033 device_printf(sc_if->sk_if_dev,
2034 "failed to allocate jumbo Rx buffer block DMA tag\n");
2038 /* create tag for Tx buffers */
2039 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2040 1, 0, /* algnmnt, boundary */
2041 BUS_SPACE_MAXADDR, /* lowaddr */
2042 BUS_SPACE_MAXADDR, /* highaddr */
2043 NULL, NULL, /* filter, filterarg */
2044 MCLBYTES * SK_MAXTXSEGS, /* maxsize */
2045 SK_MAXTXSEGS, /* nsegments */
2046 MCLBYTES, /* maxsegsize */
2048 NULL, NULL, /* lockfunc, lockarg */
2049 &sc_if->sk_cdata.sk_tx_tag);
2051 device_printf(sc_if->sk_if_dev,
2052 "failed to allocate Tx DMA tag\n");
2056 /* create tag for Rx buffers */
2057 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2058 1, 0, /* algnmnt, boundary */
2059 BUS_SPACE_MAXADDR, /* lowaddr */
2060 BUS_SPACE_MAXADDR, /* highaddr */
2061 NULL, NULL, /* filter, filterarg */
2062 MCLBYTES, /* maxsize */
2064 MCLBYTES, /* maxsegsize */
2066 NULL, NULL, /* lockfunc, lockarg */
2067 &sc_if->sk_cdata.sk_rx_tag);
2069 device_printf(sc_if->sk_if_dev,
2070 "failed to allocate Rx DMA tag\n");
2074 /* create tag for jumbo Rx buffers */
2075 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2076 PAGE_SIZE, 0, /* algnmnt, boundary */
2077 BUS_SPACE_MAXADDR, /* lowaddr */
2078 BUS_SPACE_MAXADDR, /* highaddr */
2079 NULL, NULL, /* filter, filterarg */
2080 MCLBYTES * SK_MAXRXSEGS, /* maxsize */
2081 SK_MAXRXSEGS, /* nsegments */
2082 SK_JLEN, /* maxsegsize */
2084 NULL, NULL, /* lockfunc, lockarg */
2085 &sc_if->sk_cdata.sk_jumbo_rx_tag);
2087 device_printf(sc_if->sk_if_dev,
2088 "failed to allocate jumbo Rx DMA tag\n");
2092 /* allocate DMA'able memory and load the DMA map for Tx ring */
2093 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag,
2094 (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
2095 &sc_if->sk_cdata.sk_tx_ring_map);
2097 device_printf(sc_if->sk_if_dev,
2098 "failed to allocate DMA'able memory for Tx ring\n");
2103 error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag,
2104 sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring,
2105 SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2107 device_printf(sc_if->sk_if_dev,
2108 "failed to load DMA'able memory for Tx ring\n");
2111 sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr;
2113 /* allocate DMA'able memory and load the DMA map for Rx ring */
2114 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag,
2115 (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
2116 &sc_if->sk_cdata.sk_rx_ring_map);
2118 device_printf(sc_if->sk_if_dev,
2119 "failed to allocate DMA'able memory for Rx ring\n");
2124 error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag,
2125 sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring,
2126 SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2128 device_printf(sc_if->sk_if_dev,
2129 "failed to load DMA'able memory for Rx ring\n");
2132 sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr;
2134 /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */
2135 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2136 (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring,
2137 BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2139 device_printf(sc_if->sk_if_dev,
2140 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2145 error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2146 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2147 sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb,
2148 &ctx, BUS_DMA_NOWAIT);
2150 device_printf(sc_if->sk_if_dev,
2151 "failed to load DMA'able memory for jumbo Rx ring\n");
2154 sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr;
2156 /* create DMA maps for Tx buffers */
2157 for (i = 0; i < SK_TX_RING_CNT; i++) {
2158 txd = &sc_if->sk_cdata.sk_txdesc[i];
2161 error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0,
2164 device_printf(sc_if->sk_if_dev,
2165 "failed to create Tx dmamap\n");
2169 /* create DMA maps for Rx buffers */
2170 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2171 &sc_if->sk_cdata.sk_rx_sparemap)) != 0) {
2172 device_printf(sc_if->sk_if_dev,
2173 "failed to create spare Rx dmamap\n");
2176 for (i = 0; i < SK_RX_RING_CNT; i++) {
2177 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2180 error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2183 device_printf(sc_if->sk_if_dev,
2184 "failed to create Rx dmamap\n");
2188 /* create DMA maps for jumbo Rx buffers */
2189 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2190 &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) {
2191 device_printf(sc_if->sk_if_dev,
2192 "failed to create spare jumbo Rx dmamap\n");
2195 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2196 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2198 jrxd->rx_dmamap = 0;
2199 error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2202 device_printf(sc_if->sk_if_dev,
2203 "failed to create jumbo Rx dmamap\n");
2208 /* allocate DMA'able memory and load the DMA map for jumbo buf */
2209 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_tag,
2210 (void **)&sc_if->sk_rdata.sk_jumbo_buf,
2211 BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_map);
2213 device_printf(sc_if->sk_if_dev,
2214 "failed to allocate DMA'able memory for jumbo buf\n");
2219 error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_tag,
2220 sc_if->sk_cdata.sk_jumbo_map,
2221 sc_if->sk_rdata.sk_jumbo_buf, SK_JMEM, sk_dmamap_cb,
2222 &ctx, BUS_DMA_NOWAIT);
2224 device_printf(sc_if->sk_if_dev,
2225 "failed to load DMA'able memory for jumbobuf\n");
2228 sc_if->sk_rdata.sk_jumbo_buf_paddr = ctx.sk_busaddr;
2231 * Now divide it up into 9K pieces and save the addresses
2234 ptr = sc_if->sk_rdata.sk_jumbo_buf;
2235 for (i = 0; i < SK_JSLOTS; i++) {
2236 sc_if->sk_cdata.sk_jslots[i] = ptr;
2238 entry = malloc(sizeof(struct sk_jpool_entry),
2239 M_DEVBUF, M_NOWAIT);
2240 if (entry == NULL) {
2241 device_printf(sc_if->sk_if_dev,
2242 "no memory for jumbo buffers!\n");
2247 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry,
2257 struct sk_if_softc *sc_if;
2259 struct sk_txdesc *txd;
2260 struct sk_rxdesc *rxd;
2261 struct sk_rxdesc *jrxd;
2262 struct sk_jpool_entry *entry;
2265 SK_JLIST_LOCK(sc_if);
2266 while ((entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead))) {
2267 device_printf(sc_if->sk_if_dev,
2268 "asked to free buffer that is in use!\n");
2269 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
2270 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry,
2274 while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) {
2275 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
2276 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
2277 free(entry, M_DEVBUF);
2279 SK_JLIST_UNLOCK(sc_if);
2281 /* destroy jumbo buffer block */
2282 if (sc_if->sk_cdata.sk_jumbo_map)
2283 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_tag,
2284 sc_if->sk_cdata.sk_jumbo_map);
2286 if (sc_if->sk_rdata.sk_jumbo_buf) {
2287 bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_tag,
2288 sc_if->sk_rdata.sk_jumbo_buf,
2289 sc_if->sk_cdata.sk_jumbo_map);
2290 sc_if->sk_rdata.sk_jumbo_buf = NULL;
2291 sc_if->sk_cdata.sk_jumbo_map = 0;
2295 if (sc_if->sk_cdata.sk_tx_ring_tag) {
2296 if (sc_if->sk_cdata.sk_tx_ring_map)
2297 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag,
2298 sc_if->sk_cdata.sk_tx_ring_map);
2299 if (sc_if->sk_cdata.sk_tx_ring_map &&
2300 sc_if->sk_rdata.sk_tx_ring)
2301 bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag,
2302 sc_if->sk_rdata.sk_tx_ring,
2303 sc_if->sk_cdata.sk_tx_ring_map);
2304 sc_if->sk_rdata.sk_tx_ring = NULL;
2305 sc_if->sk_cdata.sk_tx_ring_map = 0;
2306 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag);
2307 sc_if->sk_cdata.sk_tx_ring_tag = NULL;
2310 if (sc_if->sk_cdata.sk_rx_ring_tag) {
2311 if (sc_if->sk_cdata.sk_rx_ring_map)
2312 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag,
2313 sc_if->sk_cdata.sk_rx_ring_map);
2314 if (sc_if->sk_cdata.sk_rx_ring_map &&
2315 sc_if->sk_rdata.sk_rx_ring)
2316 bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag,
2317 sc_if->sk_rdata.sk_rx_ring,
2318 sc_if->sk_cdata.sk_rx_ring_map);
2319 sc_if->sk_rdata.sk_rx_ring = NULL;
2320 sc_if->sk_cdata.sk_rx_ring_map = 0;
2321 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag);
2322 sc_if->sk_cdata.sk_rx_ring_tag = NULL;
2325 if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) {
2326 if (sc_if->sk_cdata.sk_jumbo_rx_ring_map)
2327 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2328 sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2329 if (sc_if->sk_cdata.sk_jumbo_rx_ring_map &&
2330 sc_if->sk_rdata.sk_jumbo_rx_ring)
2331 bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2332 sc_if->sk_rdata.sk_jumbo_rx_ring,
2333 sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2334 sc_if->sk_rdata.sk_jumbo_rx_ring = NULL;
2335 sc_if->sk_cdata.sk_jumbo_rx_ring_map = 0;
2336 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2337 sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL;
2340 if (sc_if->sk_cdata.sk_tx_tag) {
2341 for (i = 0; i < SK_TX_RING_CNT; i++) {
2342 txd = &sc_if->sk_cdata.sk_txdesc[i];
2343 if (txd->tx_dmamap) {
2344 bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag,
2349 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag);
2350 sc_if->sk_cdata.sk_tx_tag = NULL;
2353 if (sc_if->sk_cdata.sk_rx_tag) {
2354 for (i = 0; i < SK_RX_RING_CNT; i++) {
2355 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2356 if (rxd->rx_dmamap) {
2357 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2362 if (sc_if->sk_cdata.sk_rx_sparemap) {
2363 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2364 sc_if->sk_cdata.sk_rx_sparemap);
2365 sc_if->sk_cdata.sk_rx_sparemap = 0;
2367 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag);
2368 sc_if->sk_cdata.sk_rx_tag = NULL;
2370 /* jumbo Rx buffers */
2371 if (sc_if->sk_cdata.sk_jumbo_rx_tag) {
2372 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2373 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2374 if (jrxd->rx_dmamap) {
2376 sc_if->sk_cdata.sk_jumbo_rx_tag,
2378 jrxd->rx_dmamap = 0;
2381 if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) {
2382 bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag,
2383 sc_if->sk_cdata.sk_jumbo_rx_sparemap);
2384 sc_if->sk_cdata.sk_jumbo_rx_sparemap = 0;
2386 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag);
2387 sc_if->sk_cdata.sk_jumbo_rx_tag = NULL;
2390 if (sc_if->sk_cdata.sk_parent_tag) {
2391 bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag);
2392 sc_if->sk_cdata.sk_parent_tag = NULL;
2394 mtx_destroy(&sc_if->sk_jlist_mtx);
2398 * Allocate a jumbo buffer.
2402 struct sk_if_softc *sc_if;
2404 struct sk_jpool_entry *entry;
2406 SK_JLIST_LOCK(sc_if);
2408 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
2410 if (entry == NULL) {
2411 SK_JLIST_UNLOCK(sc_if);
2415 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
2416 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
2418 SK_JLIST_UNLOCK(sc_if);
2420 return (sc_if->sk_cdata.sk_jslots[entry->slot]);
2424 * Release a jumbo buffer.
2431 struct sk_if_softc *sc_if;
2432 struct sk_jpool_entry *entry;
2435 /* Extract the softc struct pointer. */
2436 sc_if = (struct sk_if_softc *)args;
2437 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__));
2439 SK_JLIST_LOCK(sc_if);
2440 /* calculate the slot this buffer belongs to */
2441 i = ((vm_offset_t)buf
2442 - (vm_offset_t)sc_if->sk_rdata.sk_jumbo_buf) / SK_JLEN;
2443 KASSERT(i >= 0 && i < SK_JSLOTS,
2444 ("%s: asked to free buffer that we don't manage!", __func__));
2446 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
2447 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
2449 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
2450 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
2451 if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead))
2454 SK_JLIST_UNLOCK(sc_if);
2458 sk_txcksum(ifp, m, f)
2461 struct sk_tx_desc *f;
2467 offset = sizeof(struct ip) + ETHER_HDR_LEN;
2468 for(; m && m->m_len == 0; m = m->m_next)
2470 if (m == NULL || m->m_len < ETHER_HDR_LEN) {
2471 if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__);
2472 /* checksum may be corrupted */
2475 if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
2476 if (m->m_len != ETHER_HDR_LEN) {
2477 if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n",
2479 /* checksum may be corrupted */
2482 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
2485 offset = sizeof(struct ip) + ETHER_HDR_LEN;
2486 /* checksum may be corrupted */
2489 ip = mtod(m, struct ip *);
2491 p = mtod(m, u_int8_t *);
2493 ip = (struct ip *)p;
2495 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
2498 f->sk_csum_startval = 0;
2499 f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) |
2504 sk_encap(sc_if, m_head)
2505 struct sk_if_softc *sc_if;
2506 struct mbuf **m_head;
2508 struct sk_txdesc *txd;
2509 struct sk_tx_desc *f = NULL;
2511 bus_dma_segment_t txsegs[SK_MAXTXSEGS];
2512 u_int32_t cflags, frag, si, sk_ctl;
2515 SK_IF_LOCK_ASSERT(sc_if);
2517 if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL)
2520 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2521 txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2522 if (error == EFBIG) {
2523 m = m_defrag(*m_head, M_DONTWAIT);
2530 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2531 txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2537 } else if (error != 0)
2544 if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) {
2545 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2550 if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0)
2551 cflags = SK_OPCODE_CSUM;
2553 cflags = SK_OPCODE_DEFAULT;
2554 si = frag = sc_if->sk_cdata.sk_tx_prod;
2555 for (i = 0; i < nseg; i++) {
2556 f = &sc_if->sk_rdata.sk_tx_ring[frag];
2557 f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr));
2558 f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr));
2559 sk_ctl = txsegs[i].ds_len | cflags;
2561 if (cflags == SK_OPCODE_CSUM)
2562 sk_txcksum(sc_if->sk_ifp, m, f);
2563 sk_ctl |= SK_TXCTL_FIRSTFRAG;
2565 sk_ctl |= SK_TXCTL_OWN;
2566 f->sk_ctl = htole32(sk_ctl);
2567 sc_if->sk_cdata.sk_tx_cnt++;
2568 SK_INC(frag, SK_TX_RING_CNT);
2570 sc_if->sk_cdata.sk_tx_prod = frag;
2572 /* set EOF on the last desciptor */
2573 frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT;
2574 f = &sc_if->sk_rdata.sk_tx_ring[frag];
2575 f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR);
2577 /* turn the first descriptor ownership to NIC */
2578 f = &sc_if->sk_rdata.sk_tx_ring[si];
2579 f->sk_ctl |= htole32(SK_TXCTL_OWN);
2581 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q);
2582 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q);
2585 /* sync descriptors */
2586 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2587 BUS_DMASYNC_PREWRITE);
2588 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2589 sc_if->sk_cdata.sk_tx_ring_map,
2590 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2599 struct sk_if_softc *sc_if;
2601 sc_if = ifp->if_softc;
2604 sk_start_locked(ifp);
2605 SK_IF_UNLOCK(sc_if);
2611 sk_start_locked(ifp)
2614 struct sk_softc *sc;
2615 struct sk_if_softc *sc_if;
2616 struct mbuf *m_head;
2619 sc_if = ifp->if_softc;
2620 sc = sc_if->sk_softc;
2622 SK_IF_LOCK_ASSERT(sc_if);
2624 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2625 sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) {
2626 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2631 * Pack the data into the transmit ring. If we
2632 * don't have room, set the OACTIVE flag and wait
2633 * for the NIC to drain the ring.
2635 if (sk_encap(sc_if, &m_head)) {
2638 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2639 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2645 * If there's a BPF listener, bounce a copy of this frame
2648 BPF_MTAP(ifp, m_head);
2653 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2655 /* Set a timeout in case the chip goes out to lunch. */
2656 sc_if->sk_watchdog_timer = 5;
2665 struct sk_if_softc *sc_if;
2669 sc_if = ifp->if_softc;
2671 SK_IF_LOCK_ASSERT(sc_if);
2673 if (sc_if->sk_watchdog_timer == 0 || --sc_if->sk_watchdog_timer)
2677 * Reclaim first as there is a possibility of losing Tx completion
2681 if (sc_if->sk_cdata.sk_tx_cnt != 0) {
2682 if_printf(sc_if->sk_ifp, "watchdog timeout\n");
2684 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2685 sk_init_locked(sc_if);
2689 callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
2698 struct sk_softc *sc;
2700 sc = device_get_softc(dev);
2703 /* Turn off the 'driver is loaded' LED. */
2704 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
2707 * Reset the GEnesis controller. Doing this should also
2708 * assert the resets on the attached XMAC(s).
2720 struct sk_softc *sc;
2721 struct sk_if_softc *sc_if0, *sc_if1;
2722 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2724 sc = device_get_softc(dev);
2728 sc_if0 = sc->sk_if[SK_PORT_A];
2729 sc_if1 = sc->sk_if[SK_PORT_B];
2731 ifp0 = sc_if0->sk_ifp;
2733 ifp1 = sc_if1->sk_ifp;
2738 sc->sk_suspended = 1;
2749 struct sk_softc *sc;
2750 struct sk_if_softc *sc_if0, *sc_if1;
2751 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2753 sc = device_get_softc(dev);
2757 sc_if0 = sc->sk_if[SK_PORT_A];
2758 sc_if1 = sc->sk_if[SK_PORT_B];
2760 ifp0 = sc_if0->sk_ifp;
2762 ifp1 = sc_if1->sk_ifp;
2763 if (ifp0 != NULL && ifp0->if_flags & IFF_UP)
2764 sk_init_locked(sc_if0);
2765 if (ifp1 != NULL && ifp1->if_flags & IFF_UP)
2766 sk_init_locked(sc_if1);
2767 sc->sk_suspended = 0;
2775 * According to the data sheet from SK-NET GENESIS the hardware can compute
2776 * two Rx checksums at the same time(Each checksum start position is
2777 * programmed in Rx descriptors). However it seems that TCP/UDP checksum
2778 * does not work at least on my Yukon hardware. I tried every possible ways
2779 * to get correct checksum value but couldn't get correct one. So TCP/UDP
2780 * checksum offload was disabled at the moment and only IP checksum offload
2782 * As nomral IP header size is 20 bytes I can't expect it would give an
2783 * increase in throughput. However it seems it doesn't hurt performance in
2784 * my testing. If there is a more detailed information for checksum secret
2785 * of the hardware in question please contact yongari@FreeBSD.org to add
2786 * TCP/UDP checksum offload support.
2788 static __inline void
2789 sk_rxcksum(ifp, m, csum)
2794 struct ether_header *eh;
2796 int32_t hlen, len, pktlen;
2797 u_int16_t csum1, csum2, ipcsum;
2799 pktlen = m->m_pkthdr.len;
2800 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
2802 eh = mtod(m, struct ether_header *);
2803 if (eh->ether_type != htons(ETHERTYPE_IP))
2805 ip = (struct ip *)(eh + 1);
2806 if (ip->ip_v != IPVERSION)
2808 hlen = ip->ip_hl << 2;
2809 pktlen -= sizeof(struct ether_header);
2810 if (hlen < sizeof(struct ip))
2812 if (ntohs(ip->ip_len) < hlen)
2814 if (ntohs(ip->ip_len) != pktlen)
2817 csum1 = htons(csum & 0xffff);
2818 csum2 = htons((csum >> 16) & 0xffff);
2819 ipcsum = in_addword(csum1, ~csum2 & 0xffff);
2820 /* checksum fixup for IP options */
2821 len = hlen - sizeof(struct ip);
2824 * If the second checksum value is correct we can compute IP
2825 * checksum with simple math. Unfortunately the second checksum
2826 * value is wrong so we can't verify the checksum from the
2827 * value(It seems there is some magic here to get correct
2828 * value). If the second checksum value is correct it also
2829 * means we can get TCP/UDP checksum) here. However, it still
2830 * needs pseudo header checksum calculation due to hardware
2835 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2836 if (ipcsum == 0xffff)
2837 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2841 sk_rxvalid(sc, stat, len)
2842 struct sk_softc *sc;
2843 u_int32_t stat, len;
2846 if (sc->sk_type == SK_GENESIS) {
2847 if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
2848 XM_RXSTAT_BYTES(stat) != len)
2851 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
2852 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
2853 YU_RXSTAT_JABBER)) != 0 ||
2854 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
2855 YU_RXSTAT_BYTES(stat) != len)
2864 struct sk_if_softc *sc_if;
2866 struct sk_softc *sc;
2869 struct sk_rx_desc *cur_rx;
2870 struct sk_rxdesc *rxd;
2872 u_int32_t csum, rxstat, sk_ctl;
2874 sc = sc_if->sk_softc;
2875 ifp = sc_if->sk_ifp;
2877 SK_IF_LOCK_ASSERT(sc_if);
2879 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2880 sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD);
2883 for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT;
2884 prog++, SK_INC(cons, SK_RX_RING_CNT)) {
2885 cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons];
2886 sk_ctl = le32toh(cur_rx->sk_ctl);
2887 if ((sk_ctl & SK_RXCTL_OWN) != 0)
2889 rxd = &sc_if->sk_cdata.sk_rxdesc[cons];
2890 rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2892 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2893 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2894 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2895 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2896 SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN ||
2897 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2899 sk_discard_rxbuf(sc_if, cons);
2904 csum = le32toh(cur_rx->sk_csum);
2905 if (sk_newbuf(sc_if, cons) != 0) {
2907 /* reuse old buffer */
2908 sk_discard_rxbuf(sc_if, cons);
2911 m->m_pkthdr.rcvif = ifp;
2912 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2914 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2915 sk_rxcksum(ifp, m, csum);
2916 SK_IF_UNLOCK(sc_if);
2917 (*ifp->if_input)(ifp, m);
2922 sc_if->sk_cdata.sk_rx_cons = cons;
2923 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2924 sc_if->sk_cdata.sk_rx_ring_map,
2925 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2930 sk_jumbo_rxeof(sc_if)
2931 struct sk_if_softc *sc_if;
2933 struct sk_softc *sc;
2936 struct sk_rx_desc *cur_rx;
2937 struct sk_rxdesc *jrxd;
2939 u_int32_t csum, rxstat, sk_ctl;
2941 sc = sc_if->sk_softc;
2942 ifp = sc_if->sk_ifp;
2944 SK_IF_LOCK_ASSERT(sc_if);
2946 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2947 sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD);
2950 for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons;
2951 prog < SK_JUMBO_RX_RING_CNT;
2952 prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) {
2953 cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons];
2954 sk_ctl = le32toh(cur_rx->sk_ctl);
2955 if ((sk_ctl & SK_RXCTL_OWN) != 0)
2957 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons];
2958 rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2960 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2961 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2962 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2963 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2964 SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN ||
2965 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2967 sk_discard_jumbo_rxbuf(sc_if, cons);
2972 csum = le32toh(cur_rx->sk_csum);
2973 if (sk_jumbo_newbuf(sc_if, cons) != 0) {
2975 /* reuse old buffer */
2976 sk_discard_jumbo_rxbuf(sc_if, cons);
2979 m->m_pkthdr.rcvif = ifp;
2980 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2982 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2983 sk_rxcksum(ifp, m, csum);
2984 SK_IF_UNLOCK(sc_if);
2985 (*ifp->if_input)(ifp, m);
2990 sc_if->sk_cdata.sk_jumbo_rx_cons = cons;
2991 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2992 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2993 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2999 struct sk_if_softc *sc_if;
3001 struct sk_softc *sc;
3002 struct sk_txdesc *txd;
3003 struct sk_tx_desc *cur_tx;
3005 u_int32_t idx, sk_ctl;
3007 sc = sc_if->sk_softc;
3008 ifp = sc_if->sk_ifp;
3010 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
3013 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
3014 sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD);
3016 * Go through our tx ring and free mbufs for those
3017 * frames that have been sent.
3019 for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) {
3020 if (sc_if->sk_cdata.sk_tx_cnt <= 0)
3022 cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx];
3023 sk_ctl = le32toh(cur_tx->sk_ctl);
3024 if (sk_ctl & SK_TXCTL_OWN)
3026 sc_if->sk_cdata.sk_tx_cnt--;
3027 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3028 if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0)
3030 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
3031 BUS_DMASYNC_POSTWRITE);
3032 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
3037 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q);
3038 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
3039 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
3041 sc_if->sk_cdata.sk_tx_cons = idx;
3042 sc_if->sk_watchdog_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
3044 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
3045 sc_if->sk_cdata.sk_tx_ring_map,
3046 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3053 struct sk_if_softc *sc_if;
3054 struct mii_data *mii;
3059 ifp = sc_if->sk_ifp;
3060 mii = device_get_softc(sc_if->sk_miibus);
3062 if (!(ifp->if_flags & IFF_UP))
3065 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3066 sk_intr_bcom(sc_if);
3071 * According to SysKonnect, the correct way to verify that
3072 * the link has come back up is to poll bit 0 of the GPIO
3073 * register three times. This pin has the signal from the
3074 * link_sync pin connected to it; if we read the same link
3075 * state 3 times in a row, we know the link is up.
3077 for (i = 0; i < 3; i++) {
3078 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
3083 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3087 /* Turn the GP0 interrupt back on. */
3088 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
3089 SK_XM_READ_2(sc_if, XM_ISR);
3091 callout_stop(&sc_if->sk_tick_ch);
3095 sk_yukon_tick(xsc_if)
3098 struct sk_if_softc *sc_if;
3099 struct mii_data *mii;
3102 mii = device_get_softc(sc_if->sk_miibus);
3105 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
3110 struct sk_if_softc *sc_if;
3112 struct mii_data *mii;
3115 mii = device_get_softc(sc_if->sk_miibus);
3116 ifp = sc_if->sk_ifp;
3118 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3121 * Read the PHY interrupt register to make sure
3122 * we clear any pending interrupts.
3124 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
3126 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3127 sk_init_xmac(sc_if);
3131 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
3133 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
3136 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
3138 /* Turn off the link LED. */
3139 SK_IF_WRITE_1(sc_if, 0,
3140 SK_LINKLED1_CTL, SK_LINKLED_OFF);
3142 } else if (status & BRGPHY_ISR_LNK_CHG) {
3143 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3144 BRGPHY_MII_IMR, 0xFF00);
3147 /* Turn on the link LED. */
3148 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3149 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
3150 SK_LINKLED_BLINK_OFF);
3153 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3157 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3164 struct sk_if_softc *sc_if;
3166 struct sk_softc *sc;
3169 sc = sc_if->sk_softc;
3170 status = SK_XM_READ_2(sc_if, XM_ISR);
3173 * Link has gone down. Start MII tick timeout to
3174 * watch for link resync.
3176 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
3177 if (status & XM_ISR_GP0_SET) {
3178 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
3179 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3182 if (status & XM_ISR_AUTONEG_DONE) {
3183 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3187 if (status & XM_IMR_TX_UNDERRUN)
3188 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
3190 if (status & XM_IMR_RX_OVERRUN)
3191 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
3193 status = SK_XM_READ_2(sc_if, XM_ISR);
3199 sk_intr_yukon(sc_if)
3200 struct sk_if_softc *sc_if;
3204 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
3206 if ((status & SK_GMAC_INT_RX_OVER) != 0) {
3207 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3208 SK_RFCTL_RX_FIFO_OVER);
3211 if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
3212 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3213 SK_TFCTL_TX_FIFO_UNDER);
3221 struct sk_softc *sc = xsc;
3222 struct sk_if_softc *sc_if0, *sc_if1;
3223 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
3228 status = CSR_READ_4(sc, SK_ISSR);
3229 if (status == 0 || status == 0xffffffff || sc->sk_suspended)
3232 sc_if0 = sc->sk_if[SK_PORT_A];
3233 sc_if1 = sc->sk_if[SK_PORT_B];
3236 ifp0 = sc_if0->sk_ifp;
3238 ifp1 = sc_if1->sk_ifp;
3240 for (; (status &= sc->sk_intrmask) != 0;) {
3241 /* Handle receive interrupts first. */
3242 if (status & SK_ISR_RX1_EOF) {
3243 if (ifp0->if_mtu > SK_MAX_FRAMELEN)
3244 sk_jumbo_rxeof(sc_if0);
3247 CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
3248 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3250 if (status & SK_ISR_RX2_EOF) {
3251 if (ifp1->if_mtu > SK_MAX_FRAMELEN)
3252 sk_jumbo_rxeof(sc_if1);
3255 CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
3256 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3259 /* Then transmit interrupts. */
3260 if (status & SK_ISR_TX1_S_EOF) {
3262 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF);
3264 if (status & SK_ISR_TX2_S_EOF) {
3266 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF);
3269 /* Then MAC interrupts. */
3270 if (status & SK_ISR_MAC1 &&
3271 ifp0->if_drv_flags & IFF_DRV_RUNNING) {
3272 if (sc->sk_type == SK_GENESIS)
3273 sk_intr_xmac(sc_if0);
3275 sk_intr_yukon(sc_if0);
3278 if (status & SK_ISR_MAC2 &&
3279 ifp1->if_drv_flags & IFF_DRV_RUNNING) {
3280 if (sc->sk_type == SK_GENESIS)
3281 sk_intr_xmac(sc_if1);
3283 sk_intr_yukon(sc_if1);
3286 if (status & SK_ISR_EXTERNAL_REG) {
3288 sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
3289 sk_intr_bcom(sc_if0);
3291 sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
3292 sk_intr_bcom(sc_if1);
3294 status = CSR_READ_4(sc, SK_ISSR);
3297 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3299 if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3300 sk_start_locked(ifp0);
3301 if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3302 sk_start_locked(ifp1);
3310 struct sk_if_softc *sc_if;
3312 struct sk_softc *sc;
3314 u_int16_t eaddr[(ETHER_ADDR_LEN+1)/2];
3315 struct sk_bcom_hack bhack[] = {
3316 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
3317 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
3318 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
3321 SK_IF_LOCK_ASSERT(sc_if);
3323 sc = sc_if->sk_softc;
3324 ifp = sc_if->sk_ifp;
3326 /* Unreset the XMAC. */
3327 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
3330 /* Reset the XMAC's internal state. */
3331 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3333 /* Save the XMAC II revision */
3334 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
3337 * Perform additional initialization for external PHYs,
3338 * namely for the 1000baseTX cards that use the XMAC's
3341 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3345 /* Take PHY out of reset. */
3346 val = sk_win_read_4(sc, SK_GPIO);
3347 if (sc_if->sk_port == SK_PORT_A)
3348 val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
3350 val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
3351 sk_win_write_4(sc, SK_GPIO, val);
3353 /* Enable GMII mode on the XMAC. */
3354 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
3356 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3357 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
3359 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3360 BRGPHY_MII_IMR, 0xFFF0);
3363 * Early versions of the BCM5400 apparently have
3364 * a bug that requires them to have their reserved
3365 * registers initialized to some magic values. I don't
3366 * know what the numbers do, I'm just the messenger.
3368 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
3370 while(bhack[i].reg) {
3371 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3372 bhack[i].reg, bhack[i].val);
3378 /* Set station address */
3379 bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN);
3380 SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]);
3381 SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]);
3382 SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]);
3383 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
3385 if (ifp->if_flags & IFF_BROADCAST) {
3386 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3388 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3391 /* We don't need the FCS appended to the packet. */
3392 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
3394 /* We want short frames padded to 60 bytes. */
3395 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
3398 * Enable the reception of all error frames. This is is
3399 * a necessary evil due to the design of the XMAC. The
3400 * XMAC's receive FIFO is only 8K in size, however jumbo
3401 * frames can be up to 9000 bytes in length. When bad
3402 * frame filtering is enabled, the XMAC's RX FIFO operates
3403 * in 'store and forward' mode. For this to work, the
3404 * entire frame has to fit into the FIFO, but that means
3405 * that jumbo frames larger than 8192 bytes will be
3406 * truncated. Disabling all bad frame filtering causes
3407 * the RX FIFO to operate in streaming mode, in which
3408 * case the XMAC will start transfering frames out of the
3409 * RX FIFO as soon as the FIFO threshold is reached.
3411 if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3412 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
3413 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
3414 XM_MODE_RX_INRANGELEN);
3415 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3417 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3420 * Bump up the transmit threshold. This helps hold off transmit
3421 * underruns when we're blasting traffic from both ports at once.
3423 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
3425 /* Set promiscuous mode */
3426 sk_setpromisc(sc_if);
3428 /* Set multicast filter */
3431 /* Clear and enable interrupts */
3432 SK_XM_READ_2(sc_if, XM_ISR);
3433 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
3434 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
3436 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3438 /* Configure MAC arbiter */
3439 switch(sc_if->sk_xmac_rev) {
3440 case XM_XMAC_REV_B2:
3441 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
3442 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
3443 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
3444 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
3445 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
3446 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
3447 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
3448 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
3449 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3451 case XM_XMAC_REV_C1:
3452 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
3453 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
3454 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
3455 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
3456 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
3457 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
3458 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
3459 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
3460 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3465 sk_win_write_2(sc, SK_MACARB_CTL,
3466 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
3474 sk_init_yukon(sc_if)
3475 struct sk_if_softc *sc_if;
3479 struct sk_softc *sc;
3483 SK_IF_LOCK_ASSERT(sc_if);
3485 sc = sc_if->sk_softc;
3486 ifp = sc_if->sk_ifp;
3488 if (sc->sk_type == SK_YUKON_LITE &&
3489 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3491 * Workaround code for COMA mode, set PHY reset.
3492 * Otherwise it will not correctly take chip out of
3495 v = sk_win_read_4(sc, SK_GPIO);
3496 v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
3497 sk_win_write_4(sc, SK_GPIO, v);
3500 /* GMAC and GPHY Reset */
3501 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
3502 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
3505 if (sc->sk_type == SK_YUKON_LITE &&
3506 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3508 * Workaround code for COMA mode, clear PHY reset
3510 v = sk_win_read_4(sc, SK_GPIO);
3513 sk_win_write_4(sc, SK_GPIO, v);
3516 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
3517 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
3519 if (sc->sk_coppertype)
3520 phy |= SK_GPHY_COPPER;
3522 phy |= SK_GPHY_FIBER;
3524 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
3526 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
3527 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
3528 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
3530 /* unused read of the interrupt source register */
3531 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
3533 reg = SK_YU_READ_2(sc_if, YUKON_PAR);
3535 /* MIB Counter Clear Mode set */
3536 reg |= YU_PAR_MIB_CLR;
3537 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3539 /* MIB Counter Clear Mode clear */
3540 reg &= ~YU_PAR_MIB_CLR;
3541 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3543 /* receive control reg */
3544 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
3546 /* transmit parameter register */
3547 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
3548 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
3550 /* serial mode register */
3551 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
3552 if (ifp->if_mtu > SK_MAX_FRAMELEN)
3553 reg |= YU_SMR_MFL_JUMBO;
3554 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
3556 /* Setup Yukon's address */
3557 for (i = 0; i < 3; i++) {
3558 /* Write Source Address 1 (unicast filter) */
3559 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
3560 IF_LLADDR(sc_if->sk_ifp)[i * 2] |
3561 IF_LLADDR(sc_if->sk_ifp)[i * 2 + 1] << 8);
3564 for (i = 0; i < 3; i++) {
3565 reg = sk_win_read_2(sc_if->sk_softc,
3566 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
3567 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
3570 /* Set promiscuous mode */
3571 sk_setpromisc(sc_if);
3573 /* Set multicast filter */
3576 /* enable interrupt mask for counter overflows */
3577 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
3578 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
3579 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
3581 /* Configure RX MAC FIFO Flush Mask */
3582 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
3583 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
3585 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
3587 /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
3588 if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
3589 v = SK_TFCTL_OPERATION_ON;
3591 v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
3592 /* Configure RX MAC FIFO */
3593 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
3594 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
3596 /* Increase flush threshould to 64 bytes */
3597 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
3598 SK_RFCTL_FIFO_THRESHOLD + 1);
3600 /* Configure TX MAC FIFO */
3601 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
3602 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
3606 * Note that to properly initialize any part of the GEnesis chip,
3607 * you first have to take it out of reset mode.
3613 struct sk_if_softc *sc_if = xsc;
3616 sk_init_locked(sc_if);
3617 SK_IF_UNLOCK(sc_if);
3623 sk_init_locked(sc_if)
3624 struct sk_if_softc *sc_if;
3626 struct sk_softc *sc;
3628 struct mii_data *mii;
3633 SK_IF_LOCK_ASSERT(sc_if);
3635 ifp = sc_if->sk_ifp;
3636 sc = sc_if->sk_softc;
3637 mii = device_get_softc(sc_if->sk_miibus);
3639 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3642 /* Cancel pending I/O and free all RX/TX buffers. */
3645 if (sc->sk_type == SK_GENESIS) {
3646 /* Configure LINK_SYNC LED */
3647 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
3648 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3649 SK_LINKLED_LINKSYNC_ON);
3651 /* Configure RX LED */
3652 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
3653 SK_RXLEDCTL_COUNTER_START);
3655 /* Configure TX LED */
3656 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
3657 SK_TXLEDCTL_COUNTER_START);
3661 * Configure descriptor poll timer
3663 * SK-NET GENESIS data sheet says that possibility of losing Start
3664 * transmit command due to CPU/cache related interim storage problems
3665 * under certain conditions. The document recommends a polling
3666 * mechanism to send a Start transmit command to initiate transfer
3667 * of ready descriptors regulary. To cope with this issue sk(4) now
3668 * enables descriptor poll timer to initiate descriptor processing
3669 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
3670 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
3671 * command instead of waiting for next descriptor polling time.
3672 * The same rule may apply to Rx side too but it seems that is not
3673 * needed at the moment.
3674 * Since sk(4) uses descriptor polling as a last resort there is no
3675 * need to set smaller polling time than maximum allowable one.
3677 SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
3679 /* Configure I2C registers */
3681 /* Configure XMAC(s) */
3682 switch (sc->sk_type) {
3684 sk_init_xmac(sc_if);
3689 sk_init_yukon(sc_if);
3694 if (sc->sk_type == SK_GENESIS) {
3695 /* Configure MAC FIFOs */
3696 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
3697 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
3698 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
3700 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
3701 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
3702 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
3705 /* Configure transmit arbiter(s) */
3706 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
3707 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
3709 /* Configure RAMbuffers */
3710 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
3711 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
3712 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
3713 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
3714 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
3715 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
3717 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
3718 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
3719 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
3720 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
3721 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
3722 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
3723 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
3725 /* Configure BMUs */
3726 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
3727 if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3728 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3729 SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3730 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3731 SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3733 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3734 SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0)));
3735 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3736 SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0)));
3739 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
3740 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
3741 SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0)));
3742 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI,
3743 SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0)));
3745 /* Init descriptors */
3746 if (ifp->if_mtu > SK_MAX_FRAMELEN)
3747 error = sk_init_jumbo_rx_ring(sc_if);
3749 error = sk_init_rx_ring(sc_if);
3751 device_printf(sc_if->sk_if_dev,
3752 "initialization failed: no memory for rx buffers\n");
3756 sk_init_tx_ring(sc_if);
3758 /* Set interrupt moderation if changed via sysctl. */
3759 imr = sk_win_read_4(sc, SK_IMTIMERINIT);
3760 if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) {
3761 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
3764 device_printf(sc_if->sk_if_dev,
3765 "interrupt moderation is %d us.\n",
3769 /* Configure interrupt handling */
3770 CSR_READ_4(sc, SK_ISSR);
3771 if (sc_if->sk_port == SK_PORT_A)
3772 sc->sk_intrmask |= SK_INTRS1;
3774 sc->sk_intrmask |= SK_INTRS2;
3776 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
3778 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3781 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
3783 switch(sc->sk_type) {
3785 /* Enable XMACs TX and RX state machines */
3786 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
3787 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3792 reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
3793 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
3795 /* XXX disable 100Mbps and full duplex mode? */
3796 reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS);
3798 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
3801 /* Activate descriptor polling timer */
3802 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
3803 /* start transfer of Tx descriptors */
3804 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
3806 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3807 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3809 switch (sc->sk_type) {
3813 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
3817 callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
3824 struct sk_if_softc *sc_if;
3827 struct sk_softc *sc;
3828 struct sk_txdesc *txd;
3829 struct sk_rxdesc *rxd;
3830 struct sk_rxdesc *jrxd;
3834 SK_IF_LOCK_ASSERT(sc_if);
3835 sc = sc_if->sk_softc;
3836 ifp = sc_if->sk_ifp;
3838 callout_stop(&sc_if->sk_tick_ch);
3839 callout_stop(&sc_if->sk_watchdog_ch);
3841 /* stop Tx descriptor polling timer */
3842 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
3843 /* stop transfer of Tx descriptors */
3844 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
3845 for (i = 0; i < SK_TIMEOUT; i++) {
3846 val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
3847 if ((val & SK_TXBMU_TX_STOP) == 0)
3851 if (i == SK_TIMEOUT)
3852 device_printf(sc_if->sk_if_dev,
3853 "can not stop transfer of Tx descriptor\n");
3854 /* stop transfer of Rx descriptors */
3855 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
3856 for (i = 0; i < SK_TIMEOUT; i++) {
3857 val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
3858 if ((val & SK_RXBMU_RX_STOP) == 0)
3862 if (i == SK_TIMEOUT)
3863 device_printf(sc_if->sk_if_dev,
3864 "can not stop transfer of Rx descriptor\n");
3866 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3867 /* Put PHY back into reset. */
3868 val = sk_win_read_4(sc, SK_GPIO);
3869 if (sc_if->sk_port == SK_PORT_A) {
3870 val |= SK_GPIO_DIR0;
3871 val &= ~SK_GPIO_DAT0;
3873 val |= SK_GPIO_DIR2;
3874 val &= ~SK_GPIO_DAT2;
3876 sk_win_write_4(sc, SK_GPIO, val);
3879 /* Turn off various components of this interface. */
3880 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3881 switch (sc->sk_type) {
3883 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
3884 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
3889 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
3890 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
3893 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
3894 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3895 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
3896 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3897 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
3898 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3899 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3900 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
3901 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
3903 /* Disable interrupts */
3904 if (sc_if->sk_port == SK_PORT_A)
3905 sc->sk_intrmask &= ~SK_INTRS1;
3907 sc->sk_intrmask &= ~SK_INTRS2;
3908 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3910 SK_XM_READ_2(sc_if, XM_ISR);
3911 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3913 /* Free RX and TX mbufs still in the queues. */
3914 for (i = 0; i < SK_RX_RING_CNT; i++) {
3915 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
3916 if (rxd->rx_m != NULL) {
3917 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag,
3918 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3919 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag,
3925 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
3926 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
3927 if (jrxd->rx_m != NULL) {
3928 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag,
3929 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3930 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
3932 m_freem(jrxd->rx_m);
3936 for (i = 0; i < SK_TX_RING_CNT; i++) {
3937 txd = &sc_if->sk_cdata.sk_txdesc[i];
3938 if (txd->tx_m != NULL) {
3939 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag,
3940 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3941 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag,
3948 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
3954 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3960 value = *(int *)arg1;
3961 error = sysctl_handle_int(oidp, &value, 0, req);
3962 if (error || !req->newptr)
3964 if (value < low || value > high)
3966 *(int *)arg1 = value;
3971 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)
3973 return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX));