1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */
4 * SPDX-License-Identifier: BSD-4-Clause
6 * Copyright (c) 1997, 1998, 1999, 2000
7 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Bill Paul.
20 * 4. Neither the name of the author nor the names of any co-contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
34 * THE POSSIBILITY OF SUCH DAMAGE.
37 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
39 * Permission to use, copy, modify, and distribute this software for any
40 * purpose with or without fee is hereby granted, provided that the above
41 * copyright notice and this permission notice appear in all copies.
43 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
44 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
46 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
47 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
48 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
49 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
56 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
57 * the SK-984x series adapters, both single port and dual port.
59 * The XaQti XMAC II datasheet,
60 * https://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
61 * The SysKonnect GEnesis manual, http://www.syskonnect.com
63 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
64 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
65 * convenience to others until Vitesse corrects this problem:
67 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
69 * Written by Bill Paul <wpaul@ee.columbia.edu>
70 * Department of Electrical Engineering
71 * Columbia University, New York City
74 * The SysKonnect gigabit ethernet adapters consist of two main
75 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
76 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
77 * components and a PHY while the GEnesis controller provides a PCI
78 * interface with DMA support. Each card may have between 512K and
79 * 2MB of SRAM on board depending on the configuration.
81 * The SysKonnect GEnesis controller can have either one or two XMAC
82 * chips connected to it, allowing single or dual port NIC configurations.
83 * SysKonnect has the distinction of being the only vendor on the market
84 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
85 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
86 * XMAC registers. This driver takes advantage of these features to allow
87 * both XMACs to operate as independent interfaces.
90 #include <sys/param.h>
91 #include <sys/systm.h>
93 #include <sys/endian.h>
95 #include <sys/malloc.h>
96 #include <sys/kernel.h>
97 #include <sys/module.h>
98 #include <sys/socket.h>
99 #include <sys/sockio.h>
100 #include <sys/queue.h>
101 #include <sys/sysctl.h>
104 #include <net/ethernet.h>
106 #include <net/if_var.h>
107 #include <net/if_arp.h>
108 #include <net/if_dl.h>
109 #include <net/if_media.h>
110 #include <net/if_types.h>
111 #include <net/if_vlan_var.h>
113 #include <netinet/in.h>
114 #include <netinet/in_systm.h>
115 #include <netinet/ip.h>
117 #include <machine/bus.h>
118 #include <machine/in_cksum.h>
119 #include <machine/resource.h>
120 #include <sys/rman.h>
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/brgphyreg.h>
126 #include <dev/pci/pcireg.h>
127 #include <dev/pci/pcivar.h>
130 #define SK_USEIOSPACE
133 #include <dev/sk/if_skreg.h>
134 #include <dev/sk/xmaciireg.h>
135 #include <dev/sk/yukonreg.h>
137 MODULE_DEPEND(sk, pci, 1, 1, 1);
138 MODULE_DEPEND(sk, ether, 1, 1, 1);
139 MODULE_DEPEND(sk, miibus, 1, 1, 1);
141 /* "device miibus" required. See GENERIC if you get errors here. */
142 #include "miibus_if.h"
144 static const struct sk_type sk_devs[] = {
148 "SysKonnect Gigabit Ethernet (V1.0)"
153 "SysKonnect Gigabit Ethernet (V2.0)"
158 "Marvell Gigabit Ethernet"
162 DEVICEID_BELKIN_5005,
163 "Belkin F5D5005 Gigabit Ethernet"
168 "3Com 3C940 Gigabit Ethernet"
172 DEVICEID_LINKSYS_EG1032,
173 "Linksys EG1032 Gigabit Ethernet"
177 DEVICEID_DLINK_DGE530T_A1,
178 "D-Link DGE-530T Gigabit Ethernet"
182 DEVICEID_DLINK_DGE530T_B1,
183 "D-Link DGE-530T Gigabit Ethernet"
188 static int skc_probe(device_t);
189 static int skc_attach(device_t);
190 static int skc_detach(device_t);
191 static int skc_shutdown(device_t);
192 static int skc_suspend(device_t);
193 static int skc_resume(device_t);
194 static bus_dma_tag_t skc_get_dma_tag(device_t, device_t);
195 static int sk_detach(device_t);
196 static int sk_probe(device_t);
197 static int sk_attach(device_t);
198 static void sk_tick(void *);
199 static void sk_yukon_tick(void *);
200 static void sk_intr(void *);
201 static void sk_intr_xmac(struct sk_if_softc *);
202 static void sk_intr_bcom(struct sk_if_softc *);
203 static void sk_intr_yukon(struct sk_if_softc *);
204 static __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t);
205 static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
206 static void sk_rxeof(struct sk_if_softc *);
207 static void sk_jumbo_rxeof(struct sk_if_softc *);
208 static void sk_txeof(struct sk_if_softc *);
209 static void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *);
210 static int sk_encap(struct sk_if_softc *, struct mbuf **);
211 static void sk_start(struct ifnet *);
212 static void sk_start_locked(struct ifnet *);
213 static int sk_ioctl(struct ifnet *, u_long, caddr_t);
214 static void sk_init(void *);
215 static void sk_init_locked(struct sk_if_softc *);
216 static void sk_init_xmac(struct sk_if_softc *);
217 static void sk_init_yukon(struct sk_if_softc *);
218 static void sk_stop(struct sk_if_softc *);
219 static void sk_watchdog(void *);
220 static int sk_ifmedia_upd(struct ifnet *);
221 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
222 static void sk_reset(struct sk_softc *);
223 static __inline void sk_discard_rxbuf(struct sk_if_softc *, int);
224 static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int);
225 static int sk_newbuf(struct sk_if_softc *, int);
226 static int sk_jumbo_newbuf(struct sk_if_softc *, int);
227 static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
228 static int sk_dma_alloc(struct sk_if_softc *);
229 static int sk_dma_jumbo_alloc(struct sk_if_softc *);
230 static void sk_dma_free(struct sk_if_softc *);
231 static void sk_dma_jumbo_free(struct sk_if_softc *);
232 static int sk_init_rx_ring(struct sk_if_softc *);
233 static int sk_init_jumbo_rx_ring(struct sk_if_softc *);
234 static void sk_init_tx_ring(struct sk_if_softc *);
235 static u_int32_t sk_win_read_4(struct sk_softc *, int);
236 static u_int16_t sk_win_read_2(struct sk_softc *, int);
237 static u_int8_t sk_win_read_1(struct sk_softc *, int);
238 static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
239 static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
240 static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
242 static int sk_miibus_readreg(device_t, int, int);
243 static int sk_miibus_writereg(device_t, int, int, int);
244 static void sk_miibus_statchg(device_t);
246 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
247 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
249 static void sk_xmac_miibus_statchg(struct sk_if_softc *);
251 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
252 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
254 static void sk_marv_miibus_statchg(struct sk_if_softc *);
256 static uint32_t sk_xmchash(const uint8_t *);
257 static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int);
258 static void sk_rxfilter(struct sk_if_softc *);
259 static void sk_rxfilter_genesis(struct sk_if_softc *);
260 static void sk_rxfilter_yukon(struct sk_if_softc *);
262 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
263 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
266 static int jumbo_disable = 0;
267 TUNABLE_INT("hw.skc.jumbo_disable", &jumbo_disable);
270 * It seems that SK-NET GENESIS supports very simple checksum offload
271 * capability for Tx and I believe it can generate 0 checksum value for
272 * UDP packets in Tx as the hardware can't differenciate UDP packets from
273 * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it
274 * means sender didn't perforam checksum computation. For the safety I
275 * disabled UDP checksum offload capability at the moment. Alternatively
276 * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum
279 #define SK_CSUM_FEATURES (CSUM_TCP)
282 * Note that we have newbus methods for both the GEnesis controller
283 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
284 * the miibus code is a child of the XMACs. We need to do it this way
285 * so that the miibus drivers can access the PHY registers on the
286 * right PHY. It's not quite what I had in mind, but it's the only
287 * design that achieves the desired effect.
289 static device_method_t skc_methods[] = {
290 /* Device interface */
291 DEVMETHOD(device_probe, skc_probe),
292 DEVMETHOD(device_attach, skc_attach),
293 DEVMETHOD(device_detach, skc_detach),
294 DEVMETHOD(device_suspend, skc_suspend),
295 DEVMETHOD(device_resume, skc_resume),
296 DEVMETHOD(device_shutdown, skc_shutdown),
298 DEVMETHOD(bus_get_dma_tag, skc_get_dma_tag),
303 static driver_t skc_driver = {
306 sizeof(struct sk_softc)
309 static devclass_t skc_devclass;
311 static device_method_t sk_methods[] = {
312 /* Device interface */
313 DEVMETHOD(device_probe, sk_probe),
314 DEVMETHOD(device_attach, sk_attach),
315 DEVMETHOD(device_detach, sk_detach),
316 DEVMETHOD(device_shutdown, bus_generic_shutdown),
319 DEVMETHOD(miibus_readreg, sk_miibus_readreg),
320 DEVMETHOD(miibus_writereg, sk_miibus_writereg),
321 DEVMETHOD(miibus_statchg, sk_miibus_statchg),
326 static driver_t sk_driver = {
329 sizeof(struct sk_if_softc)
332 static devclass_t sk_devclass;
334 DRIVER_MODULE(skc, pci, skc_driver, skc_devclass, NULL, NULL);
335 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, NULL, NULL);
336 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, NULL, NULL);
338 static struct resource_spec sk_res_spec_io[] = {
339 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
340 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
344 static struct resource_spec sk_res_spec_mem[] = {
345 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
346 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
350 #define SK_SETBIT(sc, reg, x) \
351 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
353 #define SK_CLRBIT(sc, reg, x) \
354 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
356 #define SK_WIN_SETBIT_4(sc, reg, x) \
357 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
359 #define SK_WIN_CLRBIT_4(sc, reg, x) \
360 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
362 #define SK_WIN_SETBIT_2(sc, reg, x) \
363 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
365 #define SK_WIN_CLRBIT_2(sc, reg, x) \
366 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
369 sk_win_read_4(sc, reg)
374 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
375 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
377 return(CSR_READ_4(sc, reg));
382 sk_win_read_2(sc, reg)
387 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
388 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
390 return(CSR_READ_2(sc, reg));
395 sk_win_read_1(sc, reg)
400 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
401 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
403 return(CSR_READ_1(sc, reg));
408 sk_win_write_4(sc, reg, val)
414 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
415 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
417 CSR_WRITE_4(sc, reg, val);
423 sk_win_write_2(sc, reg, val)
429 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
430 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
432 CSR_WRITE_2(sc, reg, val);
438 sk_win_write_1(sc, reg, val)
444 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
445 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
447 CSR_WRITE_1(sc, reg, val);
453 sk_miibus_readreg(dev, phy, reg)
457 struct sk_if_softc *sc_if;
460 sc_if = device_get_softc(dev);
462 SK_IF_MII_LOCK(sc_if);
463 switch(sc_if->sk_softc->sk_type) {
465 v = sk_xmac_miibus_readreg(sc_if, phy, reg);
470 v = sk_marv_miibus_readreg(sc_if, phy, reg);
476 SK_IF_MII_UNLOCK(sc_if);
482 sk_miibus_writereg(dev, phy, reg, val)
486 struct sk_if_softc *sc_if;
489 sc_if = device_get_softc(dev);
491 SK_IF_MII_LOCK(sc_if);
492 switch(sc_if->sk_softc->sk_type) {
494 v = sk_xmac_miibus_writereg(sc_if, phy, reg, val);
499 v = sk_marv_miibus_writereg(sc_if, phy, reg, val);
505 SK_IF_MII_UNLOCK(sc_if);
511 sk_miibus_statchg(dev)
514 struct sk_if_softc *sc_if;
516 sc_if = device_get_softc(dev);
518 SK_IF_MII_LOCK(sc_if);
519 switch(sc_if->sk_softc->sk_type) {
521 sk_xmac_miibus_statchg(sc_if);
526 sk_marv_miibus_statchg(sc_if);
529 SK_IF_MII_UNLOCK(sc_if);
535 sk_xmac_miibus_readreg(sc_if, phy, reg)
536 struct sk_if_softc *sc_if;
541 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
542 SK_XM_READ_2(sc_if, XM_PHY_DATA);
543 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
544 for (i = 0; i < SK_TIMEOUT; i++) {
546 if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
547 XM_MMUCMD_PHYDATARDY)
551 if (i == SK_TIMEOUT) {
552 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
557 i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
563 sk_xmac_miibus_writereg(sc_if, phy, reg, val)
564 struct sk_if_softc *sc_if;
569 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
570 for (i = 0; i < SK_TIMEOUT; i++) {
571 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
575 if (i == SK_TIMEOUT) {
576 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
580 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
581 for (i = 0; i < SK_TIMEOUT; i++) {
583 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
587 if_printf(sc_if->sk_ifp, "phy write timed out\n");
593 sk_xmac_miibus_statchg(sc_if)
594 struct sk_if_softc *sc_if;
596 struct mii_data *mii;
598 mii = device_get_softc(sc_if->sk_miibus);
601 * If this is a GMII PHY, manually set the XMAC's
602 * duplex mode accordingly.
604 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
605 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
606 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
608 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
614 sk_marv_miibus_readreg(sc_if, phy, reg)
615 struct sk_if_softc *sc_if;
621 if (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
622 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER) {
626 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
627 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
629 for (i = 0; i < SK_TIMEOUT; i++) {
631 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
632 if (val & YU_SMICR_READ_VALID)
636 if (i == SK_TIMEOUT) {
637 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
641 val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
647 sk_marv_miibus_writereg(sc_if, phy, reg, val)
648 struct sk_if_softc *sc_if;
653 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
654 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
655 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
657 for (i = 0; i < SK_TIMEOUT; i++) {
659 if ((SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) == 0)
663 if_printf(sc_if->sk_ifp, "phy write timeout\n");
669 sk_marv_miibus_statchg(sc_if)
670 struct sk_if_softc *sc_if;
683 /* Compute CRC for the address value. */
684 crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
686 return (~crc & ((1 << HASH_BITS) - 1));
690 sk_setfilt(sc_if, addr, slot)
691 struct sk_if_softc *sc_if;
697 base = XM_RXFILT_ENTRY(slot);
699 SK_XM_WRITE_2(sc_if, base, addr[0]);
700 SK_XM_WRITE_2(sc_if, base + 2, addr[1]);
701 SK_XM_WRITE_2(sc_if, base + 4, addr[2]);
708 struct sk_if_softc *sc_if;
712 SK_IF_LOCK_ASSERT(sc_if);
714 sc = sc_if->sk_softc;
715 if (sc->sk_type == SK_GENESIS)
716 sk_rxfilter_genesis(sc_if);
718 sk_rxfilter_yukon(sc_if);
722 sk_rxfilter_genesis(sc_if)
723 struct sk_if_softc *sc_if;
725 struct ifnet *ifp = sc_if->sk_ifp;
726 u_int32_t hashes[2] = { 0, 0 }, mode;
728 struct ifmultiaddr *ifma;
729 u_int16_t dummy[] = { 0, 0, 0 };
730 u_int16_t maddr[(ETHER_ADDR_LEN+1)/2];
732 SK_IF_LOCK_ASSERT(sc_if);
734 mode = SK_XM_READ_4(sc_if, XM_MODE);
735 mode &= ~(XM_MODE_RX_PROMISC | XM_MODE_RX_USE_HASH |
736 XM_MODE_RX_USE_PERFECT);
737 /* First, zot all the existing perfect filters. */
738 for (i = 1; i < XM_RXFILT_MAX; i++)
739 sk_setfilt(sc_if, dummy, i);
741 /* Now program new ones. */
742 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
743 if (ifp->if_flags & IFF_ALLMULTI)
744 mode |= XM_MODE_RX_USE_HASH;
745 if (ifp->if_flags & IFF_PROMISC)
746 mode |= XM_MODE_RX_PROMISC;
747 hashes[0] = 0xFFFFFFFF;
748 hashes[1] = 0xFFFFFFFF;
752 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead,
754 if (ifma->ifma_addr->sa_family != AF_LINK)
757 * Program the first XM_RXFILT_MAX multicast groups
758 * into the perfect filter.
760 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
761 maddr, ETHER_ADDR_LEN);
762 if (i < XM_RXFILT_MAX) {
763 sk_setfilt(sc_if, maddr, i);
764 mode |= XM_MODE_RX_USE_PERFECT;
768 h = sk_xmchash((const uint8_t *)maddr);
770 hashes[0] |= (1 << h);
772 hashes[1] |= (1 << (h - 32));
773 mode |= XM_MODE_RX_USE_HASH;
775 if_maddr_runlock(ifp);
778 SK_XM_WRITE_4(sc_if, XM_MODE, mode);
779 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
780 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
784 sk_rxfilter_yukon(sc_if)
785 struct sk_if_softc *sc_if;
788 u_int32_t crc, hashes[2] = { 0, 0 }, mode;
789 struct ifmultiaddr *ifma;
791 SK_IF_LOCK_ASSERT(sc_if);
794 mode = SK_YU_READ_2(sc_if, YUKON_RCR);
795 if (ifp->if_flags & IFF_PROMISC)
796 mode &= ~(YU_RCR_UFLEN | YU_RCR_MUFLEN);
797 else if (ifp->if_flags & IFF_ALLMULTI) {
798 mode |= YU_RCR_UFLEN | YU_RCR_MUFLEN;
799 hashes[0] = 0xFFFFFFFF;
800 hashes[1] = 0xFFFFFFFF;
802 mode |= YU_RCR_UFLEN;
804 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
805 if (ifma->ifma_addr->sa_family != AF_LINK)
807 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
808 ifma->ifma_addr), ETHER_ADDR_LEN);
809 /* Just want the 6 least significant bits. */
811 /* Set the corresponding bit in the hash table. */
812 hashes[crc >> 5] |= 1 << (crc & 0x1f);
814 if_maddr_runlock(ifp);
815 if (hashes[0] != 0 || hashes[1] != 0)
816 mode |= YU_RCR_MUFLEN;
819 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
820 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
821 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
822 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
823 SK_YU_WRITE_2(sc_if, YUKON_RCR, mode);
827 sk_init_rx_ring(sc_if)
828 struct sk_if_softc *sc_if;
830 struct sk_ring_data *rd;
832 u_int32_t csum_start;
835 sc_if->sk_cdata.sk_rx_cons = 0;
837 csum_start = (ETHER_HDR_LEN + sizeof(struct ip)) << 16 |
839 rd = &sc_if->sk_rdata;
840 bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
841 for (i = 0; i < SK_RX_RING_CNT; i++) {
842 if (sk_newbuf(sc_if, i) != 0)
844 if (i == (SK_RX_RING_CNT - 1))
845 addr = SK_RX_RING_ADDR(sc_if, 0);
847 addr = SK_RX_RING_ADDR(sc_if, i + 1);
848 rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
849 rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start);
852 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
853 sc_if->sk_cdata.sk_rx_ring_map,
854 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
860 sk_init_jumbo_rx_ring(sc_if)
861 struct sk_if_softc *sc_if;
863 struct sk_ring_data *rd;
865 u_int32_t csum_start;
868 sc_if->sk_cdata.sk_jumbo_rx_cons = 0;
870 csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) |
872 rd = &sc_if->sk_rdata;
873 bzero(rd->sk_jumbo_rx_ring,
874 sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT);
875 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
876 if (sk_jumbo_newbuf(sc_if, i) != 0)
878 if (i == (SK_JUMBO_RX_RING_CNT - 1))
879 addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0);
881 addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1);
882 rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
883 rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start);
886 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
887 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
888 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
894 sk_init_tx_ring(sc_if)
895 struct sk_if_softc *sc_if;
897 struct sk_ring_data *rd;
898 struct sk_txdesc *txd;
902 STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq);
903 STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq);
905 sc_if->sk_cdata.sk_tx_prod = 0;
906 sc_if->sk_cdata.sk_tx_cons = 0;
907 sc_if->sk_cdata.sk_tx_cnt = 0;
909 rd = &sc_if->sk_rdata;
910 bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
911 for (i = 0; i < SK_TX_RING_CNT; i++) {
912 if (i == (SK_TX_RING_CNT - 1))
913 addr = SK_TX_RING_ADDR(sc_if, 0);
915 addr = SK_TX_RING_ADDR(sc_if, i + 1);
916 rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
917 txd = &sc_if->sk_cdata.sk_txdesc[i];
918 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
921 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
922 sc_if->sk_cdata.sk_tx_ring_map,
923 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
927 sk_discard_rxbuf(sc_if, idx)
928 struct sk_if_softc *sc_if;
931 struct sk_rx_desc *r;
932 struct sk_rxdesc *rxd;
936 r = &sc_if->sk_rdata.sk_rx_ring[idx];
937 rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
939 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
943 sk_discard_jumbo_rxbuf(sc_if, idx)
944 struct sk_if_softc *sc_if;
947 struct sk_rx_desc *r;
948 struct sk_rxdesc *rxd;
951 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
952 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
954 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
958 sk_newbuf(sc_if, idx)
959 struct sk_if_softc *sc_if;
962 struct sk_rx_desc *r;
963 struct sk_rxdesc *rxd;
965 bus_dma_segment_t segs[1];
969 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
972 m->m_len = m->m_pkthdr.len = MCLBYTES;
973 m_adj(m, ETHER_ALIGN);
975 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag,
976 sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) {
980 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
982 rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
983 if (rxd->rx_m != NULL) {
984 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
985 BUS_DMASYNC_POSTREAD);
986 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap);
988 map = rxd->rx_dmamap;
989 rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap;
990 sc_if->sk_cdata.sk_rx_sparemap = map;
991 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
992 BUS_DMASYNC_PREREAD);
994 r = &sc_if->sk_rdata.sk_rx_ring[idx];
995 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
996 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
997 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1003 sk_jumbo_newbuf(sc_if, idx)
1004 struct sk_if_softc *sc_if;
1007 struct sk_rx_desc *r;
1008 struct sk_rxdesc *rxd;
1010 bus_dma_segment_t segs[1];
1014 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1017 m->m_pkthdr.len = m->m_len = MJUM9BYTES;
1019 * Adjust alignment so packet payload begins on a
1020 * longword boundary. Mandatory for Alpha, useful on
1023 m_adj(m, ETHER_ALIGN);
1025 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag,
1026 sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1030 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1032 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
1033 if (rxd->rx_m != NULL) {
1034 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1035 BUS_DMASYNC_POSTREAD);
1036 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
1039 map = rxd->rx_dmamap;
1040 rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap;
1041 sc_if->sk_cdata.sk_jumbo_rx_sparemap = map;
1042 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1043 BUS_DMASYNC_PREREAD);
1045 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
1046 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
1047 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
1048 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1054 * Set media options.
1060 struct sk_if_softc *sc_if = ifp->if_softc;
1061 struct mii_data *mii;
1063 mii = device_get_softc(sc_if->sk_miibus);
1071 * Report current media status.
1074 sk_ifmedia_sts(ifp, ifmr)
1076 struct ifmediareq *ifmr;
1078 struct sk_if_softc *sc_if;
1079 struct mii_data *mii;
1081 sc_if = ifp->if_softc;
1082 mii = device_get_softc(sc_if->sk_miibus);
1085 ifmr->ifm_active = mii->mii_media_active;
1086 ifmr->ifm_status = mii->mii_media_status;
1092 sk_ioctl(ifp, command, data)
1097 struct sk_if_softc *sc_if = ifp->if_softc;
1098 struct ifreq *ifr = (struct ifreq *) data;
1100 struct mii_data *mii;
1105 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > SK_JUMBO_MTU)
1107 else if (ifp->if_mtu != ifr->ifr_mtu) {
1108 if (sc_if->sk_jumbo_disable != 0 &&
1109 ifr->ifr_mtu > SK_MAX_FRAMELEN)
1113 ifp->if_mtu = ifr->ifr_mtu;
1114 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1115 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1116 sk_init_locked(sc_if);
1118 SK_IF_UNLOCK(sc_if);
1124 if (ifp->if_flags & IFF_UP) {
1125 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1126 if ((ifp->if_flags ^ sc_if->sk_if_flags)
1127 & (IFF_PROMISC | IFF_ALLMULTI))
1130 sk_init_locked(sc_if);
1132 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1135 sc_if->sk_if_flags = ifp->if_flags;
1136 SK_IF_UNLOCK(sc_if);
1141 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1143 SK_IF_UNLOCK(sc_if);
1147 mii = device_get_softc(sc_if->sk_miibus);
1148 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1152 if (sc_if->sk_softc->sk_type == SK_GENESIS) {
1153 SK_IF_UNLOCK(sc_if);
1156 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1157 if ((mask & IFCAP_TXCSUM) != 0 &&
1158 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1159 ifp->if_capenable ^= IFCAP_TXCSUM;
1160 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1161 ifp->if_hwassist |= SK_CSUM_FEATURES;
1163 ifp->if_hwassist &= ~SK_CSUM_FEATURES;
1165 if ((mask & IFCAP_RXCSUM) != 0 &&
1166 (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
1167 ifp->if_capenable ^= IFCAP_RXCSUM;
1168 SK_IF_UNLOCK(sc_if);
1171 error = ether_ioctl(ifp, command, data);
1179 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1180 * IDs against our list and return a device name if we find a match.
1186 const struct sk_type *t = sk_devs;
1188 while(t->sk_name != NULL) {
1189 if ((pci_get_vendor(dev) == t->sk_vid) &&
1190 (pci_get_device(dev) == t->sk_did)) {
1192 * Only attach to rev. 2 of the Linksys EG1032 adapter.
1193 * Rev. 3 is supported by re(4).
1195 if ((t->sk_vid == VENDORID_LINKSYS) &&
1196 (t->sk_did == DEVICEID_LINKSYS_EG1032) &&
1197 (pci_get_subdevice(dev) !=
1198 SUBDEVICEID_LINKSYS_EG1032_REV2)) {
1202 device_set_desc(dev, t->sk_name);
1203 return (BUS_PROBE_DEFAULT);
1212 * Force the GEnesis into reset, then bring it out of reset.
1216 struct sk_softc *sc;
1219 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1220 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1221 if (SK_YUKON_FAMILY(sc->sk_type))
1222 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1225 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1227 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1228 if (SK_YUKON_FAMILY(sc->sk_type))
1229 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1231 if (sc->sk_type == SK_GENESIS) {
1232 /* Configure packet arbiter */
1233 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1234 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1235 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1236 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1237 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1240 /* Enable RAM interface */
1241 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1244 * Configure interrupt moderation. The moderation timer
1245 * defers interrupts specified in the interrupt moderation
1246 * timer mask based on the timeout specified in the interrupt
1247 * moderation timer init register. Each bit in the timer
1248 * register represents one tick, so to specify a timeout in
1249 * microseconds, we have to multiply by the correct number of
1250 * ticks-per-microsecond.
1252 switch (sc->sk_type) {
1254 sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS;
1257 sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON;
1261 device_printf(sc->sk_dev, "interrupt moderation is %d us\n",
1263 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
1265 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1266 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1267 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1276 struct sk_softc *sc;
1278 sc = device_get_softc(device_get_parent(dev));
1281 * Not much to do here. We always know there will be
1282 * at least one XMAC present, and if there are two,
1283 * skc_attach() will create a second device instance
1286 switch (sc->sk_type) {
1288 device_set_desc(dev, "XaQti Corp. XMAC II");
1293 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1297 return (BUS_PROBE_DEFAULT);
1301 * Each XMAC chip is attached as a separate logical IP interface.
1302 * Single port cards will have only one logical interface of course.
1308 struct sk_softc *sc;
1309 struct sk_if_softc *sc_if;
1312 int error, i, phy, port;
1314 u_char inv_mac[] = {0, 0, 0, 0, 0, 0};
1320 sc_if = device_get_softc(dev);
1321 sc = device_get_softc(device_get_parent(dev));
1322 port = *(int *)device_get_ivars(dev);
1324 sc_if->sk_if_dev = dev;
1325 sc_if->sk_port = port;
1326 sc_if->sk_softc = sc;
1327 sc->sk_if[port] = sc_if;
1328 if (port == SK_PORT_A)
1329 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1330 if (port == SK_PORT_B)
1331 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1333 callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0);
1334 callout_init_mtx(&sc_if->sk_watchdog_ch, &sc_if->sk_softc->sk_mtx, 0);
1336 if (sk_dma_alloc(sc_if) != 0) {
1340 sk_dma_jumbo_alloc(sc_if);
1342 ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
1344 device_printf(sc_if->sk_if_dev, "can not if_alloc()\n");
1348 ifp->if_softc = sc_if;
1349 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1350 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1352 * SK_GENESIS has a bug in checksum offload - From linux.
1354 if (sc_if->sk_softc->sk_type != SK_GENESIS) {
1355 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM;
1356 ifp->if_hwassist = 0;
1358 ifp->if_capabilities = 0;
1359 ifp->if_hwassist = 0;
1361 ifp->if_capenable = ifp->if_capabilities;
1363 * Some revision of Yukon controller generates corrupted
1364 * frame when TX checksum offloading is enabled. The
1365 * frame has a valid checksum value so payload might be
1366 * modified during TX checksum calculation. Disable TX
1367 * checksum offloading but give users chance to enable it
1368 * when they know their controller works without problems
1369 * with TX checksum offloading.
1371 ifp->if_capenable &= ~IFCAP_TXCSUM;
1372 ifp->if_ioctl = sk_ioctl;
1373 ifp->if_start = sk_start;
1374 ifp->if_init = sk_init;
1375 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1376 ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
1377 IFQ_SET_READY(&ifp->if_snd);
1380 * Get station address for this interface. Note that
1381 * dual port cards actually come with three station
1382 * addresses: one for each port, plus an extra. The
1383 * extra one is used by the SysKonnect driver software
1384 * as a 'virtual' station address for when both ports
1385 * are operating in failover mode. Currently we don't
1386 * use this extra address.
1389 for (i = 0; i < ETHER_ADDR_LEN; i++)
1391 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1393 /* Verify whether the station address is invalid or not. */
1394 if (bcmp(eaddr, inv_mac, sizeof(inv_mac)) == 0) {
1395 device_printf(sc_if->sk_if_dev,
1396 "Generating random ethernet address\n");
1399 * Set OUI to convenient locally assigned address. 'b'
1400 * is 0x62, which has the locally assigned bit set, and
1401 * the broadcast/multicast bit clear.
1406 eaddr[3] = (r >> 16) & 0xff;
1407 eaddr[4] = (r >> 8) & 0xff;
1408 eaddr[5] = (r >> 0) & 0xff;
1411 * Set up RAM buffer addresses. The NIC will have a certain
1412 * amount of SRAM on it, somewhere between 512K and 2MB. We
1413 * need to divide this up a) between the transmitter and
1414 * receiver and b) between the two XMACs, if this is a
1415 * dual port NIC. Our algotithm is to divide up the memory
1416 * evenly so that everyone gets a fair share.
1418 * Just to be contrary, Yukon2 appears to have separate memory
1421 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1422 u_int32_t chunk, val;
1424 chunk = sc->sk_ramsize / 2;
1425 val = sc->sk_rboff / sizeof(u_int64_t);
1426 sc_if->sk_rx_ramstart = val;
1427 val += (chunk / sizeof(u_int64_t));
1428 sc_if->sk_rx_ramend = val - 1;
1429 sc_if->sk_tx_ramstart = val;
1430 val += (chunk / sizeof(u_int64_t));
1431 sc_if->sk_tx_ramend = val - 1;
1433 u_int32_t chunk, val;
1435 chunk = sc->sk_ramsize / 4;
1436 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1438 sc_if->sk_rx_ramstart = val;
1439 val += (chunk / sizeof(u_int64_t));
1440 sc_if->sk_rx_ramend = val - 1;
1441 sc_if->sk_tx_ramstart = val;
1442 val += (chunk / sizeof(u_int64_t));
1443 sc_if->sk_tx_ramend = val - 1;
1446 /* Read and save PHY type and set PHY address */
1447 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1448 if (!SK_YUKON_FAMILY(sc->sk_type)) {
1449 switch(sc_if->sk_phytype) {
1450 case SK_PHYTYPE_XMAC:
1451 sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1453 case SK_PHYTYPE_BCOM:
1454 sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1457 device_printf(sc->sk_dev, "unsupported PHY type: %d\n",
1460 SK_IF_UNLOCK(sc_if);
1464 if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
1465 sc->sk_pmd != 'S') {
1466 /* not initialized, punt */
1467 sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
1468 sc->sk_coppertype = 1;
1471 sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1473 if (!(sc->sk_coppertype))
1474 sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
1478 * Call MI attach routine. Can't hold locks when calling into ether_*.
1480 SK_IF_UNLOCK(sc_if);
1481 ether_ifattach(ifp, eaddr);
1485 * The hardware should be ready for VLAN_MTU by default:
1486 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
1487 * YU_SMR_MFL_VLAN is set by this driver in Yukon.
1490 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1491 ifp->if_capenable |= IFCAP_VLAN_MTU;
1493 * Tell the upper layer(s) we support long frames.
1494 * Must appear after the call to ether_ifattach() because
1495 * ether_ifattach() sets ifi_hdrlen to the default value.
1497 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1503 switch (sc->sk_type) {
1505 sk_init_xmac(sc_if);
1506 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
1512 sk_init_yukon(sc_if);
1517 SK_IF_UNLOCK(sc_if);
1518 error = mii_attach(dev, &sc_if->sk_miibus, ifp, sk_ifmedia_upd,
1519 sk_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
1521 device_printf(sc_if->sk_if_dev, "attaching PHYs failed\n");
1522 ether_ifdetach(ifp);
1528 /* Access should be ok even though lock has been dropped */
1529 sc->sk_if[port] = NULL;
1537 * Attach the interface. Allocate softc structures, do ifmedia
1538 * setup and ethernet/BPF attach.
1544 struct sk_softc *sc;
1545 int error = 0, *port;
1547 const char *pname = NULL;
1550 sc = device_get_softc(dev);
1553 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1555 mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF);
1557 * Map control/status registers.
1559 pci_enable_busmaster(dev);
1561 /* Allocate resources */
1562 #ifdef SK_USEIOSPACE
1563 sc->sk_res_spec = sk_res_spec_io;
1565 sc->sk_res_spec = sk_res_spec_mem;
1567 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1569 if (sc->sk_res_spec == sk_res_spec_mem)
1570 sc->sk_res_spec = sk_res_spec_io;
1572 sc->sk_res_spec = sk_res_spec_mem;
1573 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1575 device_printf(dev, "couldn't allocate %s resources\n",
1576 sc->sk_res_spec == sk_res_spec_mem ? "memory" :
1582 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1583 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
1585 /* Bail out if chip is not recognized. */
1586 if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
1587 device_printf(dev, "unknown device: chipver=%02x, rev=%x\n",
1588 sc->sk_type, sc->sk_rev);
1593 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1594 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1595 OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW,
1596 &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I",
1597 "SK interrupt moderation");
1599 /* Pull in device tunables. */
1600 sc->sk_int_mod = SK_IM_DEFAULT;
1601 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1602 "int_mod", &sc->sk_int_mod);
1604 if (sc->sk_int_mod < SK_IM_MIN ||
1605 sc->sk_int_mod > SK_IM_MAX) {
1606 device_printf(dev, "int_mod value out of range; "
1607 "using default: %d\n", SK_IM_DEFAULT);
1608 sc->sk_int_mod = SK_IM_DEFAULT;
1612 /* Reset the adapter. */
1615 skrs = sk_win_read_1(sc, SK_EPROM0);
1616 if (sc->sk_type == SK_GENESIS) {
1617 /* Read and save RAM size and RAMbuffer offset */
1619 case SK_RAMSIZE_512K_64:
1620 sc->sk_ramsize = 0x80000;
1621 sc->sk_rboff = SK_RBOFF_0;
1623 case SK_RAMSIZE_1024K_64:
1624 sc->sk_ramsize = 0x100000;
1625 sc->sk_rboff = SK_RBOFF_80000;
1627 case SK_RAMSIZE_1024K_128:
1628 sc->sk_ramsize = 0x100000;
1629 sc->sk_rboff = SK_RBOFF_0;
1631 case SK_RAMSIZE_2048K_128:
1632 sc->sk_ramsize = 0x200000;
1633 sc->sk_rboff = SK_RBOFF_0;
1636 device_printf(dev, "unknown ram size: %d\n", skrs);
1640 } else { /* SK_YUKON_FAMILY */
1642 sc->sk_ramsize = 0x20000;
1644 sc->sk_ramsize = skrs * (1<<12);
1645 sc->sk_rboff = SK_RBOFF_0;
1648 /* Read and save physical media type */
1649 sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1651 if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
1652 sc->sk_coppertype = 1;
1654 sc->sk_coppertype = 0;
1656 /* Determine whether to name it with VPD PN or just make it up.
1657 * Marvell Yukon VPD PN seems to freqently be bogus. */
1658 switch (pci_get_device(dev)) {
1659 case DEVICEID_SK_V1:
1660 case DEVICEID_BELKIN_5005:
1661 case DEVICEID_3COM_3C940:
1662 case DEVICEID_LINKSYS_EG1032:
1663 case DEVICEID_DLINK_DGE530T_A1:
1664 case DEVICEID_DLINK_DGE530T_B1:
1665 /* Stay with VPD PN. */
1666 (void) pci_get_vpd_ident(dev, &pname);
1668 case DEVICEID_SK_V2:
1669 /* YUKON VPD PN might bear no resemblance to reality. */
1670 switch (sc->sk_type) {
1672 /* Stay with VPD PN. */
1673 (void) pci_get_vpd_ident(dev, &pname);
1676 pname = "Marvell Yukon Gigabit Ethernet";
1679 pname = "Marvell Yukon Lite Gigabit Ethernet";
1682 pname = "Marvell Yukon LP Gigabit Ethernet";
1685 pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
1689 /* Yukon Lite Rev. A0 needs special test. */
1690 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1694 /* Save flash address register before testing. */
1695 far = sk_win_read_4(sc, SK_EP_ADDR);
1697 sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
1698 testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
1700 if (testbyte != 0x00) {
1701 /* Yukon Lite Rev. A0 detected. */
1702 sc->sk_type = SK_YUKON_LITE;
1703 sc->sk_rev = SK_YUKON_LITE_REV_A0;
1704 /* Restore flash address register. */
1705 sk_win_write_4(sc, SK_EP_ADDR, far);
1710 device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
1711 "chipver=%02x, rev=%x\n",
1712 pci_get_vendor(dev), pci_get_device(dev),
1713 sc->sk_type, sc->sk_rev);
1718 if (sc->sk_type == SK_YUKON_LITE) {
1719 switch (sc->sk_rev) {
1720 case SK_YUKON_LITE_REV_A0:
1723 case SK_YUKON_LITE_REV_A1:
1726 case SK_YUKON_LITE_REV_A3:
1737 /* Announce the product name and more VPD data if there. */
1739 device_printf(dev, "%s rev. %s(0x%x)\n",
1740 pname, revstr, sc->sk_rev);
1743 device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type);
1744 device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev);
1745 device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs);
1746 device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
1749 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1750 if (sc->sk_devs[SK_PORT_A] == NULL) {
1751 device_printf(dev, "failed to add child for PORT_A\n");
1755 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1757 device_printf(dev, "failed to allocate memory for "
1758 "ivars of PORT_A\n");
1763 device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1765 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1766 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1767 if (sc->sk_devs[SK_PORT_B] == NULL) {
1768 device_printf(dev, "failed to add child for PORT_B\n");
1772 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1774 device_printf(dev, "failed to allocate memory for "
1775 "ivars of PORT_B\n");
1780 device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1783 /* Turn on the 'driver is loaded' LED. */
1784 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1786 error = bus_generic_attach(dev);
1788 device_printf(dev, "failed to attach port(s)\n");
1792 /* Hook interrupt last to avoid having to lock softc */
1793 error = bus_setup_intr(dev, sc->sk_res[1], INTR_TYPE_NET|INTR_MPSAFE,
1794 NULL, sk_intr, sc, &sc->sk_intrhand);
1797 device_printf(dev, "couldn't set up irq\n");
1809 * Shutdown hardware and free up resources. This can be called any
1810 * time after the mutex has been initialized. It is called in both
1811 * the error case in attach and the normal detach case so it needs
1812 * to be careful about only freeing resources that have actually been
1819 struct sk_if_softc *sc_if;
1822 sc_if = device_get_softc(dev);
1823 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1824 ("sk mutex not initialized in sk_detach"));
1827 ifp = sc_if->sk_ifp;
1828 /* These should only be active if attach_xmac succeeded */
1829 if (device_is_attached(dev)) {
1831 /* Can't hold locks while calling detach */
1832 SK_IF_UNLOCK(sc_if);
1833 callout_drain(&sc_if->sk_tick_ch);
1834 callout_drain(&sc_if->sk_watchdog_ch);
1835 ether_ifdetach(ifp);
1839 * We're generally called from skc_detach() which is using
1840 * device_delete_child() to get to here. It's already trashed
1841 * miibus for us, so don't do it here or we'll panic.
1844 if (sc_if->sk_miibus != NULL)
1845 device_delete_child(dev, sc_if->sk_miibus);
1847 bus_generic_detach(dev);
1848 sk_dma_jumbo_free(sc_if);
1850 SK_IF_UNLOCK(sc_if);
1861 struct sk_softc *sc;
1863 sc = device_get_softc(dev);
1864 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1866 if (device_is_alive(dev)) {
1867 if (sc->sk_devs[SK_PORT_A] != NULL) {
1868 free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
1869 device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1871 if (sc->sk_devs[SK_PORT_B] != NULL) {
1872 free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
1873 device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1875 bus_generic_detach(dev);
1878 if (sc->sk_intrhand)
1879 bus_teardown_intr(dev, sc->sk_res[1], sc->sk_intrhand);
1880 bus_release_resources(dev, sc->sk_res_spec, sc->sk_res);
1882 mtx_destroy(&sc->sk_mii_mtx);
1883 mtx_destroy(&sc->sk_mtx);
1888 static bus_dma_tag_t
1889 skc_get_dma_tag(device_t bus, device_t child __unused)
1892 return (bus_get_dma_tag(bus));
1895 struct sk_dmamap_arg {
1896 bus_addr_t sk_busaddr;
1900 sk_dmamap_cb(arg, segs, nseg, error)
1902 bus_dma_segment_t *segs;
1906 struct sk_dmamap_arg *ctx;
1912 ctx->sk_busaddr = segs[0].ds_addr;
1916 * Allocate jumbo buffer storage. The SysKonnect adapters support
1917 * "jumbograms" (9K frames), although SysKonnect doesn't currently
1918 * use them in their drivers. In order for us to use them, we need
1919 * large 9K receive buffers, however standard mbuf clusters are only
1920 * 2048 bytes in size. Consequently, we need to allocate and manage
1921 * our own jumbo buffer pool. Fortunately, this does not require an
1922 * excessive amount of additional code.
1926 struct sk_if_softc *sc_if;
1928 struct sk_dmamap_arg ctx;
1929 struct sk_txdesc *txd;
1930 struct sk_rxdesc *rxd;
1933 /* create parent tag */
1936 * This driver should use BUS_SPACE_MAXADDR for lowaddr argument
1937 * in bus_dma_tag_create(9) as the NIC would support DAC mode.
1938 * However bz@ reported that it does not work on amd64 with > 4GB
1939 * RAM. Until we have more clues of the breakage, disable DAC mode
1940 * by limiting DMA address to be in 32bit address space.
1942 error = bus_dma_tag_create(
1943 bus_get_dma_tag(sc_if->sk_if_dev),/* parent */
1944 1, 0, /* algnmnt, boundary */
1945 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1946 BUS_SPACE_MAXADDR, /* highaddr */
1947 NULL, NULL, /* filter, filterarg */
1948 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1950 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1952 NULL, NULL, /* lockfunc, lockarg */
1953 &sc_if->sk_cdata.sk_parent_tag);
1955 device_printf(sc_if->sk_if_dev,
1956 "failed to create parent DMA tag\n");
1960 /* create tag for Tx ring */
1961 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1962 SK_RING_ALIGN, 0, /* algnmnt, boundary */
1963 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1964 BUS_SPACE_MAXADDR, /* highaddr */
1965 NULL, NULL, /* filter, filterarg */
1966 SK_TX_RING_SZ, /* maxsize */
1968 SK_TX_RING_SZ, /* maxsegsize */
1970 NULL, NULL, /* lockfunc, lockarg */
1971 &sc_if->sk_cdata.sk_tx_ring_tag);
1973 device_printf(sc_if->sk_if_dev,
1974 "failed to allocate Tx ring DMA tag\n");
1978 /* create tag for Rx ring */
1979 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1980 SK_RING_ALIGN, 0, /* algnmnt, boundary */
1981 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1982 BUS_SPACE_MAXADDR, /* highaddr */
1983 NULL, NULL, /* filter, filterarg */
1984 SK_RX_RING_SZ, /* maxsize */
1986 SK_RX_RING_SZ, /* maxsegsize */
1988 NULL, NULL, /* lockfunc, lockarg */
1989 &sc_if->sk_cdata.sk_rx_ring_tag);
1991 device_printf(sc_if->sk_if_dev,
1992 "failed to allocate Rx ring DMA tag\n");
1996 /* create tag for Tx buffers */
1997 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1998 1, 0, /* algnmnt, boundary */
1999 BUS_SPACE_MAXADDR, /* lowaddr */
2000 BUS_SPACE_MAXADDR, /* highaddr */
2001 NULL, NULL, /* filter, filterarg */
2002 MCLBYTES * SK_MAXTXSEGS, /* maxsize */
2003 SK_MAXTXSEGS, /* nsegments */
2004 MCLBYTES, /* maxsegsize */
2006 NULL, NULL, /* lockfunc, lockarg */
2007 &sc_if->sk_cdata.sk_tx_tag);
2009 device_printf(sc_if->sk_if_dev,
2010 "failed to allocate Tx DMA tag\n");
2014 /* create tag for Rx buffers */
2015 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2016 1, 0, /* algnmnt, boundary */
2017 BUS_SPACE_MAXADDR, /* lowaddr */
2018 BUS_SPACE_MAXADDR, /* highaddr */
2019 NULL, NULL, /* filter, filterarg */
2020 MCLBYTES, /* maxsize */
2022 MCLBYTES, /* maxsegsize */
2024 NULL, NULL, /* lockfunc, lockarg */
2025 &sc_if->sk_cdata.sk_rx_tag);
2027 device_printf(sc_if->sk_if_dev,
2028 "failed to allocate Rx DMA tag\n");
2032 /* allocate DMA'able memory and load the DMA map for Tx ring */
2033 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag,
2034 (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT |
2035 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_tx_ring_map);
2037 device_printf(sc_if->sk_if_dev,
2038 "failed to allocate DMA'able memory for Tx ring\n");
2043 error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag,
2044 sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring,
2045 SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2047 device_printf(sc_if->sk_if_dev,
2048 "failed to load DMA'able memory for Tx ring\n");
2051 sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr;
2053 /* allocate DMA'able memory and load the DMA map for Rx ring */
2054 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag,
2055 (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT |
2056 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_rx_ring_map);
2058 device_printf(sc_if->sk_if_dev,
2059 "failed to allocate DMA'able memory for Rx ring\n");
2064 error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag,
2065 sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring,
2066 SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2068 device_printf(sc_if->sk_if_dev,
2069 "failed to load DMA'able memory for Rx ring\n");
2072 sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr;
2074 /* create DMA maps for Tx buffers */
2075 for (i = 0; i < SK_TX_RING_CNT; i++) {
2076 txd = &sc_if->sk_cdata.sk_txdesc[i];
2078 txd->tx_dmamap = NULL;
2079 error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0,
2082 device_printf(sc_if->sk_if_dev,
2083 "failed to create Tx dmamap\n");
2088 /* create DMA maps for Rx buffers */
2089 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2090 &sc_if->sk_cdata.sk_rx_sparemap)) != 0) {
2091 device_printf(sc_if->sk_if_dev,
2092 "failed to create spare Rx dmamap\n");
2095 for (i = 0; i < SK_RX_RING_CNT; i++) {
2096 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2098 rxd->rx_dmamap = NULL;
2099 error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2102 device_printf(sc_if->sk_if_dev,
2103 "failed to create Rx dmamap\n");
2113 sk_dma_jumbo_alloc(sc_if)
2114 struct sk_if_softc *sc_if;
2116 struct sk_dmamap_arg ctx;
2117 struct sk_rxdesc *jrxd;
2120 if (jumbo_disable != 0) {
2121 device_printf(sc_if->sk_if_dev, "disabling jumbo frame support\n");
2122 sc_if->sk_jumbo_disable = 1;
2125 /* create tag for jumbo Rx ring */
2126 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2127 SK_RING_ALIGN, 0, /* algnmnt, boundary */
2128 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2129 BUS_SPACE_MAXADDR, /* highaddr */
2130 NULL, NULL, /* filter, filterarg */
2131 SK_JUMBO_RX_RING_SZ, /* maxsize */
2133 SK_JUMBO_RX_RING_SZ, /* maxsegsize */
2135 NULL, NULL, /* lockfunc, lockarg */
2136 &sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2138 device_printf(sc_if->sk_if_dev,
2139 "failed to allocate jumbo Rx ring DMA tag\n");
2143 /* create tag for jumbo Rx buffers */
2144 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2145 1, 0, /* algnmnt, boundary */
2146 BUS_SPACE_MAXADDR, /* lowaddr */
2147 BUS_SPACE_MAXADDR, /* highaddr */
2148 NULL, NULL, /* filter, filterarg */
2149 MJUM9BYTES, /* maxsize */
2151 MJUM9BYTES, /* maxsegsize */
2153 NULL, NULL, /* lockfunc, lockarg */
2154 &sc_if->sk_cdata.sk_jumbo_rx_tag);
2156 device_printf(sc_if->sk_if_dev,
2157 "failed to allocate jumbo Rx DMA tag\n");
2161 /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */
2162 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2163 (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring, BUS_DMA_NOWAIT |
2164 BUS_DMA_COHERENT | BUS_DMA_ZERO,
2165 &sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2167 device_printf(sc_if->sk_if_dev,
2168 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2173 error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2174 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2175 sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb,
2176 &ctx, BUS_DMA_NOWAIT);
2178 device_printf(sc_if->sk_if_dev,
2179 "failed to load DMA'able memory for jumbo Rx ring\n");
2182 sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr;
2184 /* create DMA maps for jumbo Rx buffers */
2185 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2186 &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) {
2187 device_printf(sc_if->sk_if_dev,
2188 "failed to create spare jumbo Rx dmamap\n");
2191 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2192 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2194 jrxd->rx_dmamap = NULL;
2195 error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2198 device_printf(sc_if->sk_if_dev,
2199 "failed to create jumbo Rx dmamap\n");
2207 sk_dma_jumbo_free(sc_if);
2208 device_printf(sc_if->sk_if_dev, "disabling jumbo frame support due to "
2209 "resource shortage\n");
2210 sc_if->sk_jumbo_disable = 1;
2216 struct sk_if_softc *sc_if;
2218 struct sk_txdesc *txd;
2219 struct sk_rxdesc *rxd;
2223 if (sc_if->sk_cdata.sk_tx_ring_tag) {
2224 if (sc_if->sk_rdata.sk_tx_ring_paddr)
2225 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag,
2226 sc_if->sk_cdata.sk_tx_ring_map);
2227 if (sc_if->sk_rdata.sk_tx_ring)
2228 bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag,
2229 sc_if->sk_rdata.sk_tx_ring,
2230 sc_if->sk_cdata.sk_tx_ring_map);
2231 sc_if->sk_rdata.sk_tx_ring = NULL;
2232 sc_if->sk_rdata.sk_tx_ring_paddr = 0;
2233 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag);
2234 sc_if->sk_cdata.sk_tx_ring_tag = NULL;
2237 if (sc_if->sk_cdata.sk_rx_ring_tag) {
2238 if (sc_if->sk_rdata.sk_rx_ring_paddr)
2239 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag,
2240 sc_if->sk_cdata.sk_rx_ring_map);
2241 if (sc_if->sk_rdata.sk_rx_ring)
2242 bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag,
2243 sc_if->sk_rdata.sk_rx_ring,
2244 sc_if->sk_cdata.sk_rx_ring_map);
2245 sc_if->sk_rdata.sk_rx_ring = NULL;
2246 sc_if->sk_rdata.sk_rx_ring_paddr = 0;
2247 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag);
2248 sc_if->sk_cdata.sk_rx_ring_tag = NULL;
2251 if (sc_if->sk_cdata.sk_tx_tag) {
2252 for (i = 0; i < SK_TX_RING_CNT; i++) {
2253 txd = &sc_if->sk_cdata.sk_txdesc[i];
2254 if (txd->tx_dmamap) {
2255 bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag,
2257 txd->tx_dmamap = NULL;
2260 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag);
2261 sc_if->sk_cdata.sk_tx_tag = NULL;
2264 if (sc_if->sk_cdata.sk_rx_tag) {
2265 for (i = 0; i < SK_RX_RING_CNT; i++) {
2266 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2267 if (rxd->rx_dmamap) {
2268 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2270 rxd->rx_dmamap = NULL;
2273 if (sc_if->sk_cdata.sk_rx_sparemap) {
2274 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2275 sc_if->sk_cdata.sk_rx_sparemap);
2276 sc_if->sk_cdata.sk_rx_sparemap = NULL;
2278 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag);
2279 sc_if->sk_cdata.sk_rx_tag = NULL;
2282 if (sc_if->sk_cdata.sk_parent_tag) {
2283 bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag);
2284 sc_if->sk_cdata.sk_parent_tag = NULL;
2289 sk_dma_jumbo_free(sc_if)
2290 struct sk_if_softc *sc_if;
2292 struct sk_rxdesc *jrxd;
2296 if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) {
2297 if (sc_if->sk_rdata.sk_jumbo_rx_ring_paddr)
2298 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2299 sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2300 if (sc_if->sk_rdata.sk_jumbo_rx_ring)
2301 bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2302 sc_if->sk_rdata.sk_jumbo_rx_ring,
2303 sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2304 sc_if->sk_rdata.sk_jumbo_rx_ring = NULL;
2305 sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = 0;
2306 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2307 sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL;
2310 /* jumbo Rx buffers */
2311 if (sc_if->sk_cdata.sk_jumbo_rx_tag) {
2312 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2313 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2314 if (jrxd->rx_dmamap) {
2316 sc_if->sk_cdata.sk_jumbo_rx_tag,
2318 jrxd->rx_dmamap = NULL;
2321 if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) {
2322 bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag,
2323 sc_if->sk_cdata.sk_jumbo_rx_sparemap);
2324 sc_if->sk_cdata.sk_jumbo_rx_sparemap = NULL;
2326 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag);
2327 sc_if->sk_cdata.sk_jumbo_rx_tag = NULL;
2332 sk_txcksum(ifp, m, f)
2335 struct sk_tx_desc *f;
2341 offset = sizeof(struct ip) + ETHER_HDR_LEN;
2342 for(; m && m->m_len == 0; m = m->m_next)
2344 if (m == NULL || m->m_len < ETHER_HDR_LEN) {
2345 if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__);
2346 /* checksum may be corrupted */
2349 if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
2350 if (m->m_len != ETHER_HDR_LEN) {
2351 if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n",
2353 /* checksum may be corrupted */
2356 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
2359 offset = sizeof(struct ip) + ETHER_HDR_LEN;
2360 /* checksum may be corrupted */
2363 ip = mtod(m, struct ip *);
2365 p = mtod(m, u_int8_t *);
2367 ip = (struct ip *)p;
2369 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
2372 f->sk_csum_startval = 0;
2373 f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) |
2378 sk_encap(sc_if, m_head)
2379 struct sk_if_softc *sc_if;
2380 struct mbuf **m_head;
2382 struct sk_txdesc *txd;
2383 struct sk_tx_desc *f = NULL;
2385 bus_dma_segment_t txsegs[SK_MAXTXSEGS];
2386 u_int32_t cflags, frag, si, sk_ctl;
2389 SK_IF_LOCK_ASSERT(sc_if);
2391 if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL)
2394 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2395 txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2396 if (error == EFBIG) {
2397 m = m_defrag(*m_head, M_NOWAIT);
2404 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2405 txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2411 } else if (error != 0)
2418 if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) {
2419 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2424 if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0)
2425 cflags = SK_OPCODE_CSUM;
2427 cflags = SK_OPCODE_DEFAULT;
2428 si = frag = sc_if->sk_cdata.sk_tx_prod;
2429 for (i = 0; i < nseg; i++) {
2430 f = &sc_if->sk_rdata.sk_tx_ring[frag];
2431 f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr));
2432 f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr));
2433 sk_ctl = txsegs[i].ds_len | cflags;
2435 if (cflags == SK_OPCODE_CSUM)
2436 sk_txcksum(sc_if->sk_ifp, m, f);
2437 sk_ctl |= SK_TXCTL_FIRSTFRAG;
2439 sk_ctl |= SK_TXCTL_OWN;
2440 f->sk_ctl = htole32(sk_ctl);
2441 sc_if->sk_cdata.sk_tx_cnt++;
2442 SK_INC(frag, SK_TX_RING_CNT);
2444 sc_if->sk_cdata.sk_tx_prod = frag;
2446 /* set EOF on the last desciptor */
2447 frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT;
2448 f = &sc_if->sk_rdata.sk_tx_ring[frag];
2449 f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR);
2451 /* turn the first descriptor ownership to NIC */
2452 f = &sc_if->sk_rdata.sk_tx_ring[si];
2453 f->sk_ctl |= htole32(SK_TXCTL_OWN);
2455 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q);
2456 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q);
2459 /* sync descriptors */
2460 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2461 BUS_DMASYNC_PREWRITE);
2462 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2463 sc_if->sk_cdata.sk_tx_ring_map,
2464 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2473 struct sk_if_softc *sc_if;
2475 sc_if = ifp->if_softc;
2478 sk_start_locked(ifp);
2479 SK_IF_UNLOCK(sc_if);
2485 sk_start_locked(ifp)
2488 struct sk_softc *sc;
2489 struct sk_if_softc *sc_if;
2490 struct mbuf *m_head;
2493 sc_if = ifp->if_softc;
2494 sc = sc_if->sk_softc;
2496 SK_IF_LOCK_ASSERT(sc_if);
2498 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2499 sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) {
2500 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2505 * Pack the data into the transmit ring. If we
2506 * don't have room, set the OACTIVE flag and wait
2507 * for the NIC to drain the ring.
2509 if (sk_encap(sc_if, &m_head)) {
2512 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2513 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2519 * If there's a BPF listener, bounce a copy of this frame
2522 BPF_MTAP(ifp, m_head);
2527 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2529 /* Set a timeout in case the chip goes out to lunch. */
2530 sc_if->sk_watchdog_timer = 5;
2539 struct sk_if_softc *sc_if;
2543 sc_if = ifp->if_softc;
2545 SK_IF_LOCK_ASSERT(sc_if);
2547 if (sc_if->sk_watchdog_timer == 0 || --sc_if->sk_watchdog_timer)
2551 * Reclaim first as there is a possibility of losing Tx completion
2555 if (sc_if->sk_cdata.sk_tx_cnt != 0) {
2556 if_printf(sc_if->sk_ifp, "watchdog timeout\n");
2557 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2558 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2559 sk_init_locked(sc_if);
2563 callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
2572 struct sk_softc *sc;
2574 sc = device_get_softc(dev);
2577 /* Turn off the 'driver is loaded' LED. */
2578 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
2581 * Reset the GEnesis controller. Doing this should also
2582 * assert the resets on the attached XMAC(s).
2594 struct sk_softc *sc;
2595 struct sk_if_softc *sc_if0, *sc_if1;
2596 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2598 sc = device_get_softc(dev);
2602 sc_if0 = sc->sk_if[SK_PORT_A];
2603 sc_if1 = sc->sk_if[SK_PORT_B];
2605 ifp0 = sc_if0->sk_ifp;
2607 ifp1 = sc_if1->sk_ifp;
2612 sc->sk_suspended = 1;
2623 struct sk_softc *sc;
2624 struct sk_if_softc *sc_if0, *sc_if1;
2625 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2627 sc = device_get_softc(dev);
2631 sc_if0 = sc->sk_if[SK_PORT_A];
2632 sc_if1 = sc->sk_if[SK_PORT_B];
2634 ifp0 = sc_if0->sk_ifp;
2636 ifp1 = sc_if1->sk_ifp;
2637 if (ifp0 != NULL && ifp0->if_flags & IFF_UP)
2638 sk_init_locked(sc_if0);
2639 if (ifp1 != NULL && ifp1->if_flags & IFF_UP)
2640 sk_init_locked(sc_if1);
2641 sc->sk_suspended = 0;
2649 * According to the data sheet from SK-NET GENESIS the hardware can compute
2650 * two Rx checksums at the same time(Each checksum start position is
2651 * programmed in Rx descriptors). However it seems that TCP/UDP checksum
2652 * does not work at least on my Yukon hardware. I tried every possible ways
2653 * to get correct checksum value but couldn't get correct one. So TCP/UDP
2654 * checksum offload was disabled at the moment and only IP checksum offload
2656 * As nomral IP header size is 20 bytes I can't expect it would give an
2657 * increase in throughput. However it seems it doesn't hurt performance in
2658 * my testing. If there is a more detailed information for checksum secret
2659 * of the hardware in question please contact yongari@FreeBSD.org to add
2660 * TCP/UDP checksum offload support.
2662 static __inline void
2663 sk_rxcksum(ifp, m, csum)
2668 struct ether_header *eh;
2670 int32_t hlen, len, pktlen;
2671 u_int16_t csum1, csum2, ipcsum;
2673 pktlen = m->m_pkthdr.len;
2674 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
2676 eh = mtod(m, struct ether_header *);
2677 if (eh->ether_type != htons(ETHERTYPE_IP))
2679 ip = (struct ip *)(eh + 1);
2680 if (ip->ip_v != IPVERSION)
2682 hlen = ip->ip_hl << 2;
2683 pktlen -= sizeof(struct ether_header);
2684 if (hlen < sizeof(struct ip))
2686 if (ntohs(ip->ip_len) < hlen)
2688 if (ntohs(ip->ip_len) != pktlen)
2691 csum1 = htons(csum & 0xffff);
2692 csum2 = htons((csum >> 16) & 0xffff);
2693 ipcsum = in_addword(csum1, ~csum2 & 0xffff);
2694 /* checksum fixup for IP options */
2695 len = hlen - sizeof(struct ip);
2698 * If the second checksum value is correct we can compute IP
2699 * checksum with simple math. Unfortunately the second checksum
2700 * value is wrong so we can't verify the checksum from the
2701 * value(It seems there is some magic here to get correct
2702 * value). If the second checksum value is correct it also
2703 * means we can get TCP/UDP checksum) here. However, it still
2704 * needs pseudo header checksum calculation due to hardware
2709 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2710 if (ipcsum == 0xffff)
2711 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2715 sk_rxvalid(sc, stat, len)
2716 struct sk_softc *sc;
2717 u_int32_t stat, len;
2720 if (sc->sk_type == SK_GENESIS) {
2721 if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
2722 XM_RXSTAT_BYTES(stat) != len)
2725 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
2726 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
2727 YU_RXSTAT_JABBER)) != 0 ||
2728 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
2729 YU_RXSTAT_BYTES(stat) != len)
2738 struct sk_if_softc *sc_if;
2740 struct sk_softc *sc;
2743 struct sk_rx_desc *cur_rx;
2744 struct sk_rxdesc *rxd;
2746 u_int32_t csum, rxstat, sk_ctl;
2748 sc = sc_if->sk_softc;
2749 ifp = sc_if->sk_ifp;
2751 SK_IF_LOCK_ASSERT(sc_if);
2753 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2754 sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD);
2757 for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT;
2758 prog++, SK_INC(cons, SK_RX_RING_CNT)) {
2759 cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons];
2760 sk_ctl = le32toh(cur_rx->sk_ctl);
2761 if ((sk_ctl & SK_RXCTL_OWN) != 0)
2763 rxd = &sc_if->sk_cdata.sk_rxdesc[cons];
2764 rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2766 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2767 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2768 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2769 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2770 SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN ||
2771 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2772 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2773 sk_discard_rxbuf(sc_if, cons);
2778 csum = le32toh(cur_rx->sk_csum);
2779 if (sk_newbuf(sc_if, cons) != 0) {
2780 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2781 /* reuse old buffer */
2782 sk_discard_rxbuf(sc_if, cons);
2785 m->m_pkthdr.rcvif = ifp;
2786 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2787 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2788 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2789 sk_rxcksum(ifp, m, csum);
2790 SK_IF_UNLOCK(sc_if);
2791 (*ifp->if_input)(ifp, m);
2796 sc_if->sk_cdata.sk_rx_cons = cons;
2797 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2798 sc_if->sk_cdata.sk_rx_ring_map,
2799 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2804 sk_jumbo_rxeof(sc_if)
2805 struct sk_if_softc *sc_if;
2807 struct sk_softc *sc;
2810 struct sk_rx_desc *cur_rx;
2811 struct sk_rxdesc *jrxd;
2813 u_int32_t csum, rxstat, sk_ctl;
2815 sc = sc_if->sk_softc;
2816 ifp = sc_if->sk_ifp;
2818 SK_IF_LOCK_ASSERT(sc_if);
2820 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2821 sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD);
2824 for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons;
2825 prog < SK_JUMBO_RX_RING_CNT;
2826 prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) {
2827 cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons];
2828 sk_ctl = le32toh(cur_rx->sk_ctl);
2829 if ((sk_ctl & SK_RXCTL_OWN) != 0)
2831 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons];
2832 rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2834 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2835 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2836 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2837 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2838 SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN ||
2839 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2840 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2841 sk_discard_jumbo_rxbuf(sc_if, cons);
2846 csum = le32toh(cur_rx->sk_csum);
2847 if (sk_jumbo_newbuf(sc_if, cons) != 0) {
2848 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2849 /* reuse old buffer */
2850 sk_discard_jumbo_rxbuf(sc_if, cons);
2853 m->m_pkthdr.rcvif = ifp;
2854 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2855 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2856 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2857 sk_rxcksum(ifp, m, csum);
2858 SK_IF_UNLOCK(sc_if);
2859 (*ifp->if_input)(ifp, m);
2864 sc_if->sk_cdata.sk_jumbo_rx_cons = cons;
2865 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2866 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2867 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2873 struct sk_if_softc *sc_if;
2875 struct sk_txdesc *txd;
2876 struct sk_tx_desc *cur_tx;
2878 u_int32_t idx, sk_ctl;
2880 ifp = sc_if->sk_ifp;
2882 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
2885 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2886 sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD);
2888 * Go through our tx ring and free mbufs for those
2889 * frames that have been sent.
2891 for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) {
2892 if (sc_if->sk_cdata.sk_tx_cnt <= 0)
2894 cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx];
2895 sk_ctl = le32toh(cur_tx->sk_ctl);
2896 if (sk_ctl & SK_TXCTL_OWN)
2898 sc_if->sk_cdata.sk_tx_cnt--;
2899 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2900 if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0)
2902 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2903 BUS_DMASYNC_POSTWRITE);
2904 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2906 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2909 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q);
2910 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
2911 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
2913 sc_if->sk_cdata.sk_tx_cons = idx;
2914 sc_if->sk_watchdog_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
2916 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2917 sc_if->sk_cdata.sk_tx_ring_map,
2918 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2925 struct sk_if_softc *sc_if;
2926 struct mii_data *mii;
2931 ifp = sc_if->sk_ifp;
2932 mii = device_get_softc(sc_if->sk_miibus);
2934 if (!(ifp->if_flags & IFF_UP))
2937 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2938 sk_intr_bcom(sc_if);
2943 * According to SysKonnect, the correct way to verify that
2944 * the link has come back up is to poll bit 0 of the GPIO
2945 * register three times. This pin has the signal from the
2946 * link_sync pin connected to it; if we read the same link
2947 * state 3 times in a row, we know the link is up.
2949 for (i = 0; i < 3; i++) {
2950 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2955 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
2959 /* Turn the GP0 interrupt back on. */
2960 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2961 SK_XM_READ_2(sc_if, XM_ISR);
2963 callout_stop(&sc_if->sk_tick_ch);
2967 sk_yukon_tick(xsc_if)
2970 struct sk_if_softc *sc_if;
2971 struct mii_data *mii;
2974 mii = device_get_softc(sc_if->sk_miibus);
2977 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
2982 struct sk_if_softc *sc_if;
2984 struct mii_data *mii;
2987 mii = device_get_softc(sc_if->sk_miibus);
2988 ifp = sc_if->sk_ifp;
2990 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2993 * Read the PHY interrupt register to make sure
2994 * we clear any pending interrupts.
2996 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2998 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2999 sk_init_xmac(sc_if);
3003 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
3005 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
3008 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
3010 /* Turn off the link LED. */
3011 SK_IF_WRITE_1(sc_if, 0,
3012 SK_LINKLED1_CTL, SK_LINKLED_OFF);
3014 } else if (status & BRGPHY_ISR_LNK_CHG) {
3015 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3016 BRGPHY_MII_IMR, 0xFF00);
3019 /* Turn on the link LED. */
3020 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3021 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
3022 SK_LINKLED_BLINK_OFF);
3025 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3029 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3036 struct sk_if_softc *sc_if;
3038 struct sk_softc *sc;
3041 sc = sc_if->sk_softc;
3042 status = SK_XM_READ_2(sc_if, XM_ISR);
3045 * Link has gone down. Start MII tick timeout to
3046 * watch for link resync.
3048 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
3049 if (status & XM_ISR_GP0_SET) {
3050 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
3051 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3054 if (status & XM_ISR_AUTONEG_DONE) {
3055 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3059 if (status & XM_IMR_TX_UNDERRUN)
3060 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
3062 if (status & XM_IMR_RX_OVERRUN)
3063 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
3065 status = SK_XM_READ_2(sc_if, XM_ISR);
3071 sk_intr_yukon(sc_if)
3072 struct sk_if_softc *sc_if;
3076 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
3078 if ((status & SK_GMAC_INT_RX_OVER) != 0) {
3079 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3080 SK_RFCTL_RX_FIFO_OVER);
3083 if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
3084 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3085 SK_TFCTL_TX_FIFO_UNDER);
3093 struct sk_softc *sc = xsc;
3094 struct sk_if_softc *sc_if0, *sc_if1;
3095 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
3100 status = CSR_READ_4(sc, SK_ISSR);
3101 if (status == 0 || status == 0xffffffff || sc->sk_suspended)
3104 sc_if0 = sc->sk_if[SK_PORT_A];
3105 sc_if1 = sc->sk_if[SK_PORT_B];
3108 ifp0 = sc_if0->sk_ifp;
3110 ifp1 = sc_if1->sk_ifp;
3112 for (; (status &= sc->sk_intrmask) != 0;) {
3113 /* Handle receive interrupts first. */
3114 if (status & SK_ISR_RX1_EOF) {
3115 if (ifp0->if_mtu > SK_MAX_FRAMELEN)
3116 sk_jumbo_rxeof(sc_if0);
3119 CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
3120 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3122 if (status & SK_ISR_RX2_EOF) {
3123 if (ifp1->if_mtu > SK_MAX_FRAMELEN)
3124 sk_jumbo_rxeof(sc_if1);
3127 CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
3128 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3131 /* Then transmit interrupts. */
3132 if (status & SK_ISR_TX1_S_EOF) {
3134 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF);
3136 if (status & SK_ISR_TX2_S_EOF) {
3138 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF);
3141 /* Then MAC interrupts. */
3142 if (status & SK_ISR_MAC1 &&
3143 ifp0->if_drv_flags & IFF_DRV_RUNNING) {
3144 if (sc->sk_type == SK_GENESIS)
3145 sk_intr_xmac(sc_if0);
3147 sk_intr_yukon(sc_if0);
3150 if (status & SK_ISR_MAC2 &&
3151 ifp1->if_drv_flags & IFF_DRV_RUNNING) {
3152 if (sc->sk_type == SK_GENESIS)
3153 sk_intr_xmac(sc_if1);
3155 sk_intr_yukon(sc_if1);
3158 if (status & SK_ISR_EXTERNAL_REG) {
3160 sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
3161 sk_intr_bcom(sc_if0);
3163 sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
3164 sk_intr_bcom(sc_if1);
3166 status = CSR_READ_4(sc, SK_ISSR);
3169 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3171 if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3172 sk_start_locked(ifp0);
3173 if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3174 sk_start_locked(ifp1);
3182 struct sk_if_softc *sc_if;
3184 struct sk_softc *sc;
3186 u_int16_t eaddr[(ETHER_ADDR_LEN+1)/2];
3187 static const struct sk_bcom_hack bhack[] = {
3188 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
3189 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
3190 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
3193 SK_IF_LOCK_ASSERT(sc_if);
3195 sc = sc_if->sk_softc;
3196 ifp = sc_if->sk_ifp;
3198 /* Unreset the XMAC. */
3199 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
3202 /* Reset the XMAC's internal state. */
3203 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3205 /* Save the XMAC II revision */
3206 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
3209 * Perform additional initialization for external PHYs,
3210 * namely for the 1000baseTX cards that use the XMAC's
3213 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3217 /* Take PHY out of reset. */
3218 val = sk_win_read_4(sc, SK_GPIO);
3219 if (sc_if->sk_port == SK_PORT_A)
3220 val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
3222 val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
3223 sk_win_write_4(sc, SK_GPIO, val);
3225 /* Enable GMII mode on the XMAC. */
3226 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
3228 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3229 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
3231 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3232 BRGPHY_MII_IMR, 0xFFF0);
3235 * Early versions of the BCM5400 apparently have
3236 * a bug that requires them to have their reserved
3237 * registers initialized to some magic values. I don't
3238 * know what the numbers do, I'm just the messenger.
3240 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
3242 while(bhack[i].reg) {
3243 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3244 bhack[i].reg, bhack[i].val);
3250 /* Set station address */
3251 bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN);
3252 SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]);
3253 SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]);
3254 SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]);
3255 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
3257 if (ifp->if_flags & IFF_BROADCAST) {
3258 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3260 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3263 /* We don't need the FCS appended to the packet. */
3264 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
3266 /* We want short frames padded to 60 bytes. */
3267 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
3270 * Enable the reception of all error frames. This is is
3271 * a necessary evil due to the design of the XMAC. The
3272 * XMAC's receive FIFO is only 8K in size, however jumbo
3273 * frames can be up to 9000 bytes in length. When bad
3274 * frame filtering is enabled, the XMAC's RX FIFO operates
3275 * in 'store and forward' mode. For this to work, the
3276 * entire frame has to fit into the FIFO, but that means
3277 * that jumbo frames larger than 8192 bytes will be
3278 * truncated. Disabling all bad frame filtering causes
3279 * the RX FIFO to operate in streaming mode, in which
3280 * case the XMAC will start transferring frames out of the
3281 * RX FIFO as soon as the FIFO threshold is reached.
3283 if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3284 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
3285 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
3286 XM_MODE_RX_INRANGELEN);
3287 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3289 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3292 * Bump up the transmit threshold. This helps hold off transmit
3293 * underruns when we're blasting traffic from both ports at once.
3295 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
3298 sk_rxfilter_genesis(sc_if);
3300 /* Clear and enable interrupts */
3301 SK_XM_READ_2(sc_if, XM_ISR);
3302 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
3303 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
3305 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3307 /* Configure MAC arbiter */
3308 switch(sc_if->sk_xmac_rev) {
3309 case XM_XMAC_REV_B2:
3310 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
3311 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
3312 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
3313 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
3314 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
3315 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
3316 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
3317 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
3318 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3320 case XM_XMAC_REV_C1:
3321 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
3322 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
3323 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
3324 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
3325 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
3326 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
3327 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
3328 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
3329 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3334 sk_win_write_2(sc, SK_MACARB_CTL,
3335 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
3343 sk_init_yukon(sc_if)
3344 struct sk_if_softc *sc_if;
3348 struct sk_softc *sc;
3353 SK_IF_LOCK_ASSERT(sc_if);
3355 sc = sc_if->sk_softc;
3356 ifp = sc_if->sk_ifp;
3358 if (sc->sk_type == SK_YUKON_LITE &&
3359 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3361 * Workaround code for COMA mode, set PHY reset.
3362 * Otherwise it will not correctly take chip out of
3365 v = sk_win_read_4(sc, SK_GPIO);
3366 v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
3367 sk_win_write_4(sc, SK_GPIO, v);
3370 /* GMAC and GPHY Reset */
3371 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
3372 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
3375 if (sc->sk_type == SK_YUKON_LITE &&
3376 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3378 * Workaround code for COMA mode, clear PHY reset
3380 v = sk_win_read_4(sc, SK_GPIO);
3383 sk_win_write_4(sc, SK_GPIO, v);
3386 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
3387 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
3389 if (sc->sk_coppertype)
3390 phy |= SK_GPHY_COPPER;
3392 phy |= SK_GPHY_FIBER;
3394 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
3396 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
3397 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
3398 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
3400 /* unused read of the interrupt source register */
3401 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
3403 reg = SK_YU_READ_2(sc_if, YUKON_PAR);
3405 /* MIB Counter Clear Mode set */
3406 reg |= YU_PAR_MIB_CLR;
3407 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3409 /* MIB Counter Clear Mode clear */
3410 reg &= ~YU_PAR_MIB_CLR;
3411 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3413 /* receive control reg */
3414 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
3416 /* transmit parameter register */
3417 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
3418 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
3420 /* serial mode register */
3421 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
3422 if (ifp->if_mtu > SK_MAX_FRAMELEN)
3423 reg |= YU_SMR_MFL_JUMBO;
3424 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
3426 /* Setup Yukon's station address */
3427 eaddr = IF_LLADDR(sc_if->sk_ifp);
3428 for (i = 0; i < 3; i++)
3429 SK_YU_WRITE_2(sc_if, SK_MAC0_0 + i * 4,
3430 eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
3431 /* Set GMAC source address of flow control. */
3432 for (i = 0; i < 3; i++)
3433 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
3434 eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
3435 /* Set GMAC virtual address. */
3436 for (i = 0; i < 3; i++)
3437 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4,
3438 eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
3441 sk_rxfilter_yukon(sc_if);
3443 /* enable interrupt mask for counter overflows */
3444 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
3445 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
3446 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
3448 /* Configure RX MAC FIFO Flush Mask */
3449 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
3450 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
3452 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
3454 /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
3455 if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
3456 v = SK_TFCTL_OPERATION_ON;
3458 v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
3459 /* Configure RX MAC FIFO */
3460 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
3461 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
3463 /* Increase flush threshould to 64 bytes */
3464 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
3465 SK_RFCTL_FIFO_THRESHOLD + 1);
3467 /* Configure TX MAC FIFO */
3468 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
3469 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
3473 * Note that to properly initialize any part of the GEnesis chip,
3474 * you first have to take it out of reset mode.
3480 struct sk_if_softc *sc_if = xsc;
3483 sk_init_locked(sc_if);
3484 SK_IF_UNLOCK(sc_if);
3490 sk_init_locked(sc_if)
3491 struct sk_if_softc *sc_if;
3493 struct sk_softc *sc;
3495 struct mii_data *mii;
3500 SK_IF_LOCK_ASSERT(sc_if);
3502 ifp = sc_if->sk_ifp;
3503 sc = sc_if->sk_softc;
3504 mii = device_get_softc(sc_if->sk_miibus);
3506 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3509 /* Cancel pending I/O and free all RX/TX buffers. */
3512 if (sc->sk_type == SK_GENESIS) {
3513 /* Configure LINK_SYNC LED */
3514 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
3515 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3516 SK_LINKLED_LINKSYNC_ON);
3518 /* Configure RX LED */
3519 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
3520 SK_RXLEDCTL_COUNTER_START);
3522 /* Configure TX LED */
3523 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
3524 SK_TXLEDCTL_COUNTER_START);
3528 * Configure descriptor poll timer
3530 * SK-NET GENESIS data sheet says that possibility of losing Start
3531 * transmit command due to CPU/cache related interim storage problems
3532 * under certain conditions. The document recommends a polling
3533 * mechanism to send a Start transmit command to initiate transfer
3534 * of ready descriptors regulary. To cope with this issue sk(4) now
3535 * enables descriptor poll timer to initiate descriptor processing
3536 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
3537 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
3538 * command instead of waiting for next descriptor polling time.
3539 * The same rule may apply to Rx side too but it seems that is not
3540 * needed at the moment.
3541 * Since sk(4) uses descriptor polling as a last resort there is no
3542 * need to set smaller polling time than maximum allowable one.
3544 SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
3546 /* Configure I2C registers */
3548 /* Configure XMAC(s) */
3549 switch (sc->sk_type) {
3551 sk_init_xmac(sc_if);
3556 sk_init_yukon(sc_if);
3561 if (sc->sk_type == SK_GENESIS) {
3562 /* Configure MAC FIFOs */
3563 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
3564 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
3565 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
3567 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
3568 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
3569 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
3572 /* Configure transmit arbiter(s) */
3573 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
3574 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
3576 /* Configure RAMbuffers */
3577 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
3578 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
3579 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
3580 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
3581 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
3582 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
3584 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
3585 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
3586 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
3587 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
3588 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
3589 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
3590 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
3592 /* Configure BMUs */
3593 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
3594 if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3595 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3596 SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3597 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3598 SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3600 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3601 SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0)));
3602 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3603 SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0)));
3606 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
3607 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
3608 SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0)));
3609 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI,
3610 SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0)));
3612 /* Init descriptors */
3613 if (ifp->if_mtu > SK_MAX_FRAMELEN)
3614 error = sk_init_jumbo_rx_ring(sc_if);
3616 error = sk_init_rx_ring(sc_if);
3618 device_printf(sc_if->sk_if_dev,
3619 "initialization failed: no memory for rx buffers\n");
3623 sk_init_tx_ring(sc_if);
3625 /* Set interrupt moderation if changed via sysctl. */
3626 imr = sk_win_read_4(sc, SK_IMTIMERINIT);
3627 if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) {
3628 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
3631 device_printf(sc_if->sk_if_dev,
3632 "interrupt moderation is %d us.\n",
3636 /* Configure interrupt handling */
3637 CSR_READ_4(sc, SK_ISSR);
3638 if (sc_if->sk_port == SK_PORT_A)
3639 sc->sk_intrmask |= SK_INTRS1;
3641 sc->sk_intrmask |= SK_INTRS2;
3643 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
3645 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3648 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
3650 switch(sc->sk_type) {
3652 /* Enable XMACs TX and RX state machines */
3653 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
3654 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3659 reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
3660 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
3662 /* XXX disable 100Mbps and full duplex mode? */
3663 reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS);
3665 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
3668 /* Activate descriptor polling timer */
3669 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
3670 /* start transfer of Tx descriptors */
3671 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
3673 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3674 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3676 switch (sc->sk_type) {
3680 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
3684 callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
3691 struct sk_if_softc *sc_if;
3694 struct sk_softc *sc;
3695 struct sk_txdesc *txd;
3696 struct sk_rxdesc *rxd;
3697 struct sk_rxdesc *jrxd;
3701 SK_IF_LOCK_ASSERT(sc_if);
3702 sc = sc_if->sk_softc;
3703 ifp = sc_if->sk_ifp;
3705 callout_stop(&sc_if->sk_tick_ch);
3706 callout_stop(&sc_if->sk_watchdog_ch);
3708 /* stop Tx descriptor polling timer */
3709 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
3710 /* stop transfer of Tx descriptors */
3711 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
3712 for (i = 0; i < SK_TIMEOUT; i++) {
3713 val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
3714 if ((val & SK_TXBMU_TX_STOP) == 0)
3718 if (i == SK_TIMEOUT)
3719 device_printf(sc_if->sk_if_dev,
3720 "can not stop transfer of Tx descriptor\n");
3721 /* stop transfer of Rx descriptors */
3722 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
3723 for (i = 0; i < SK_TIMEOUT; i++) {
3724 val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
3725 if ((val & SK_RXBMU_RX_STOP) == 0)
3729 if (i == SK_TIMEOUT)
3730 device_printf(sc_if->sk_if_dev,
3731 "can not stop transfer of Rx descriptor\n");
3733 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3734 /* Put PHY back into reset. */
3735 val = sk_win_read_4(sc, SK_GPIO);
3736 if (sc_if->sk_port == SK_PORT_A) {
3737 val |= SK_GPIO_DIR0;
3738 val &= ~SK_GPIO_DAT0;
3740 val |= SK_GPIO_DIR2;
3741 val &= ~SK_GPIO_DAT2;
3743 sk_win_write_4(sc, SK_GPIO, val);
3746 /* Turn off various components of this interface. */
3747 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3748 switch (sc->sk_type) {
3750 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
3751 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
3756 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
3757 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
3760 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
3761 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3762 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
3763 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3764 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
3765 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3766 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3767 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
3768 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
3770 /* Disable interrupts */
3771 if (sc_if->sk_port == SK_PORT_A)
3772 sc->sk_intrmask &= ~SK_INTRS1;
3774 sc->sk_intrmask &= ~SK_INTRS2;
3775 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3777 SK_XM_READ_2(sc_if, XM_ISR);
3778 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3780 /* Free RX and TX mbufs still in the queues. */
3781 for (i = 0; i < SK_RX_RING_CNT; i++) {
3782 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
3783 if (rxd->rx_m != NULL) {
3784 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag,
3785 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3786 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag,
3792 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
3793 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
3794 if (jrxd->rx_m != NULL) {
3795 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag,
3796 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3797 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
3799 m_freem(jrxd->rx_m);
3803 for (i = 0; i < SK_TX_RING_CNT; i++) {
3804 txd = &sc_if->sk_cdata.sk_txdesc[i];
3805 if (txd->tx_m != NULL) {
3806 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag,
3807 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3808 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag,
3815 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
3821 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3827 value = *(int *)arg1;
3828 error = sysctl_handle_int(oidp, &value, 0, req);
3829 if (error || !req->newptr)
3831 if (value < low || value > high)
3833 *(int *)arg1 = value;
3838 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)
3840 return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX));