1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */
4 * Copyright (c) 1997, 1998, 1999, 2000
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
57 * The XaQti XMAC II datasheet,
58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 * The SysKonnect GEnesis manual, http://www.syskonnect.com
61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
72 * The SysKonnect gigabit ethernet adapters consist of two main
73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
75 * components and a PHY while the GEnesis controller provides a PCI
76 * interface with DMA support. Each card may have between 512K and
77 * 2MB of SRAM on board depending on the configuration.
79 * The SysKonnect GEnesis controller can have either one or two XMAC
80 * chips connected to it, allowing single or dual port NIC configurations.
81 * SysKonnect has the distinction of being the only vendor on the market
82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
84 * XMAC registers. This driver takes advantage of these features to allow
85 * both XMACs to operate as independent interfaces.
88 #include <sys/param.h>
89 #include <sys/systm.h>
91 #include <sys/endian.h>
93 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/module.h>
96 #include <sys/socket.h>
97 #include <sys/sockio.h>
98 #include <sys/queue.h>
99 #include <sys/sysctl.h>
102 #include <net/ethernet.h>
104 #include <net/if_arp.h>
105 #include <net/if_dl.h>
106 #include <net/if_media.h>
107 #include <net/if_types.h>
108 #include <net/if_vlan_var.h>
110 #include <netinet/in.h>
111 #include <netinet/in_systm.h>
112 #include <netinet/ip.h>
114 #include <machine/bus.h>
115 #include <machine/in_cksum.h>
116 #include <machine/resource.h>
117 #include <sys/rman.h>
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/brgphyreg.h>
123 #include <dev/pci/pcireg.h>
124 #include <dev/pci/pcivar.h>
127 #define SK_USEIOSPACE
130 #include <dev/sk/if_skreg.h>
131 #include <dev/sk/xmaciireg.h>
132 #include <dev/sk/yukonreg.h>
134 MODULE_DEPEND(sk, pci, 1, 1, 1);
135 MODULE_DEPEND(sk, ether, 1, 1, 1);
136 MODULE_DEPEND(sk, miibus, 1, 1, 1);
138 /* "device miibus" required. See GENERIC if you get errors here. */
139 #include "miibus_if.h"
142 static const char rcsid[] =
146 static struct sk_type sk_devs[] = {
150 "SysKonnect Gigabit Ethernet (V1.0)"
155 "SysKonnect Gigabit Ethernet (V2.0)"
160 "Marvell Gigabit Ethernet"
164 DEVICEID_BELKIN_5005,
165 "Belkin F5D5005 Gigabit Ethernet"
170 "3Com 3C940 Gigabit Ethernet"
174 DEVICEID_LINKSYS_EG1032,
175 "Linksys EG1032 Gigabit Ethernet"
179 DEVICEID_DLINK_DGE530T_A1,
180 "D-Link DGE-530T Gigabit Ethernet"
184 DEVICEID_DLINK_DGE530T_B1,
185 "D-Link DGE-530T Gigabit Ethernet"
190 static int skc_probe(device_t);
191 static int skc_attach(device_t);
192 static int skc_detach(device_t);
193 static void skc_shutdown(device_t);
194 static int skc_suspend(device_t);
195 static int skc_resume(device_t);
196 static int sk_detach(device_t);
197 static int sk_probe(device_t);
198 static int sk_attach(device_t);
199 static void sk_tick(void *);
200 static void sk_yukon_tick(void *);
201 static void sk_intr(void *);
202 static void sk_intr_xmac(struct sk_if_softc *);
203 static void sk_intr_bcom(struct sk_if_softc *);
204 static void sk_intr_yukon(struct sk_if_softc *);
205 static __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t);
206 static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
207 static void sk_rxeof(struct sk_if_softc *);
208 static void sk_jumbo_rxeof(struct sk_if_softc *);
209 static void sk_txeof(struct sk_if_softc *);
210 static void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *);
211 static int sk_encap(struct sk_if_softc *, struct mbuf **);
212 static void sk_start(struct ifnet *);
213 static void sk_start_locked(struct ifnet *);
214 static int sk_ioctl(struct ifnet *, u_long, caddr_t);
215 static void sk_init(void *);
216 static void sk_init_locked(struct sk_if_softc *);
217 static void sk_init_xmac(struct sk_if_softc *);
218 static void sk_init_yukon(struct sk_if_softc *);
219 static void sk_stop(struct sk_if_softc *);
220 static void sk_watchdog(void *);
221 static int sk_ifmedia_upd(struct ifnet *);
222 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
223 static void sk_reset(struct sk_softc *);
224 static __inline void sk_discard_rxbuf(struct sk_if_softc *, int);
225 static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int);
226 static int sk_newbuf(struct sk_if_softc *, int);
227 static int sk_jumbo_newbuf(struct sk_if_softc *, int);
228 static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
229 static int sk_dma_alloc(struct sk_if_softc *);
230 static void sk_dma_free(struct sk_if_softc *);
231 static void *sk_jalloc(struct sk_if_softc *);
232 static void sk_jfree(void *, void *);
233 static int sk_init_rx_ring(struct sk_if_softc *);
234 static int sk_init_jumbo_rx_ring(struct sk_if_softc *);
235 static void sk_init_tx_ring(struct sk_if_softc *);
236 static u_int32_t sk_win_read_4(struct sk_softc *, int);
237 static u_int16_t sk_win_read_2(struct sk_softc *, int);
238 static u_int8_t sk_win_read_1(struct sk_softc *, int);
239 static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
240 static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
241 static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
242 static u_int8_t sk_vpd_readbyte(struct sk_softc *, int);
243 static void sk_vpd_read_res(struct sk_softc *, struct vpd_res *, int);
244 static void sk_vpd_read(struct sk_softc *);
246 static int sk_miibus_readreg(device_t, int, int);
247 static int sk_miibus_writereg(device_t, int, int, int);
248 static void sk_miibus_statchg(device_t);
250 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
251 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
253 static void sk_xmac_miibus_statchg(struct sk_if_softc *);
255 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
256 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
258 static void sk_marv_miibus_statchg(struct sk_if_softc *);
260 static uint32_t sk_xmchash(const uint8_t *);
261 static uint32_t sk_gmchash(const uint8_t *);
262 static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int);
263 static void sk_setmulti(struct sk_if_softc *);
264 static void sk_setpromisc(struct sk_if_softc *);
266 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
267 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
270 #define SK_RES SYS_RES_IOPORT
271 #define SK_RID SK_PCI_LOIO
273 #define SK_RES SYS_RES_MEMORY
274 #define SK_RID SK_PCI_LOMEM
278 * It seems that SK-NET GENESIS supports very simple checksum offload
279 * capability for Tx and I believe it can generate 0 checksum value for
280 * UDP packets in Tx as the hardware can't differenciate UDP packets from
281 * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it
282 * means sender didn't perforam checksum computation. For the safety I
283 * disabled UDP checksum offload capability at the moment. Alternatively
284 * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum
287 #define SK_CSUM_FEATURES (CSUM_TCP)
290 * Note that we have newbus methods for both the GEnesis controller
291 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
292 * the miibus code is a child of the XMACs. We need to do it this way
293 * so that the miibus drivers can access the PHY registers on the
294 * right PHY. It's not quite what I had in mind, but it's the only
295 * design that achieves the desired effect.
297 static device_method_t skc_methods[] = {
298 /* Device interface */
299 DEVMETHOD(device_probe, skc_probe),
300 DEVMETHOD(device_attach, skc_attach),
301 DEVMETHOD(device_detach, skc_detach),
302 DEVMETHOD(device_suspend, skc_suspend),
303 DEVMETHOD(device_resume, skc_resume),
304 DEVMETHOD(device_shutdown, skc_shutdown),
307 DEVMETHOD(bus_print_child, bus_generic_print_child),
308 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
313 static driver_t skc_driver = {
316 sizeof(struct sk_softc)
319 static devclass_t skc_devclass;
321 static device_method_t sk_methods[] = {
322 /* Device interface */
323 DEVMETHOD(device_probe, sk_probe),
324 DEVMETHOD(device_attach, sk_attach),
325 DEVMETHOD(device_detach, sk_detach),
326 DEVMETHOD(device_shutdown, bus_generic_shutdown),
329 DEVMETHOD(bus_print_child, bus_generic_print_child),
330 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
333 DEVMETHOD(miibus_readreg, sk_miibus_readreg),
334 DEVMETHOD(miibus_writereg, sk_miibus_writereg),
335 DEVMETHOD(miibus_statchg, sk_miibus_statchg),
340 static driver_t sk_driver = {
343 sizeof(struct sk_if_softc)
346 static devclass_t sk_devclass;
348 DRIVER_MODULE(skc, pci, skc_driver, skc_devclass, 0, 0);
349 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
350 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
352 #define SK_SETBIT(sc, reg, x) \
353 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
355 #define SK_CLRBIT(sc, reg, x) \
356 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
358 #define SK_WIN_SETBIT_4(sc, reg, x) \
359 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
361 #define SK_WIN_CLRBIT_4(sc, reg, x) \
362 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
364 #define SK_WIN_SETBIT_2(sc, reg, x) \
365 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
367 #define SK_WIN_CLRBIT_2(sc, reg, x) \
368 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
371 sk_win_read_4(sc, reg)
376 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
377 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
379 return(CSR_READ_4(sc, reg));
384 sk_win_read_2(sc, reg)
389 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
390 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
392 return(CSR_READ_2(sc, reg));
397 sk_win_read_1(sc, reg)
402 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
403 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
405 return(CSR_READ_1(sc, reg));
410 sk_win_write_4(sc, reg, val)
416 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
417 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
419 CSR_WRITE_4(sc, reg, val);
425 sk_win_write_2(sc, reg, val)
431 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
432 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
434 CSR_WRITE_2(sc, reg, val);
440 sk_win_write_1(sc, reg, val)
446 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
447 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
449 CSR_WRITE_1(sc, reg, val);
455 * The VPD EEPROM contains Vital Product Data, as suggested in
456 * the PCI 2.1 specification. The VPD data is separared into areas
457 * denoted by resource IDs. The SysKonnect VPD contains an ID string
458 * resource (the name of the adapter), a read-only area resource
459 * containing various key/data fields and a read/write area which
460 * can be used to store asset management information or log messages.
461 * We read the ID string and read-only into buffers attached to
462 * the controller softc structure for later use. At the moment,
463 * we only use the ID string during skc_attach().
466 sk_vpd_readbyte(sc, addr)
472 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
473 for (i = 0; i < SK_TIMEOUT; i++) {
474 /* ASUS LOM takes a very long time to read VPD. */
476 if (sk_win_read_2(sc,
477 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
484 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
488 sk_vpd_read_res(sc, res, addr)
496 ptr = (u_int8_t *)res;
497 for (i = 0; i < sizeof(struct vpd_res); i++)
498 ptr[i] = sk_vpd_readbyte(sc, i + addr);
510 /* Check VPD capability */
511 if (sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_CAPID)) != PCIY_VPD)
513 if (sc->sk_vpd_prodname != NULL)
514 free(sc->sk_vpd_prodname, M_DEVBUF);
515 if (sc->sk_vpd_readonly != NULL)
516 free(sc->sk_vpd_readonly, M_DEVBUF);
517 sc->sk_vpd_prodname = NULL;
518 sc->sk_vpd_readonly = NULL;
519 sc->sk_vpd_readonly_len = 0;
521 sk_vpd_read_res(sc, &res, pos);
524 * Bail out quietly if the eeprom appears to be missing or empty.
526 if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff)
529 if (res.vr_id != VPD_RES_ID) {
530 device_printf(sc->sk_dev, "bad VPD resource id: expected %x "
531 "got %x\n", VPD_RES_ID, res.vr_id);
536 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
537 if (sc->sk_vpd_prodname != NULL) {
538 for (i = 0; i < res.vr_len; i++)
539 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
540 sc->sk_vpd_prodname[i] = '\0';
544 sk_vpd_read_res(sc, &res, pos);
546 if (res.vr_id != VPD_RES_READ) {
547 device_printf(sc->sk_dev, "bad VPD resource id: expected %x "
548 "got %x\n", VPD_RES_READ, res.vr_id);
553 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
554 for (i = 0; i < res.vr_len; i++)
555 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
556 sc->sk_vpd_readonly_len = res.vr_len;
562 sk_miibus_readreg(dev, phy, reg)
566 struct sk_if_softc *sc_if;
569 sc_if = device_get_softc(dev);
571 SK_IF_MII_LOCK(sc_if);
572 switch(sc_if->sk_softc->sk_type) {
574 v = sk_xmac_miibus_readreg(sc_if, phy, reg);
579 v = sk_marv_miibus_readreg(sc_if, phy, reg);
585 SK_IF_MII_UNLOCK(sc_if);
591 sk_miibus_writereg(dev, phy, reg, val)
595 struct sk_if_softc *sc_if;
598 sc_if = device_get_softc(dev);
600 SK_IF_MII_LOCK(sc_if);
601 switch(sc_if->sk_softc->sk_type) {
603 v = sk_xmac_miibus_writereg(sc_if, phy, reg, val);
608 v = sk_marv_miibus_writereg(sc_if, phy, reg, val);
614 SK_IF_MII_UNLOCK(sc_if);
620 sk_miibus_statchg(dev)
623 struct sk_if_softc *sc_if;
625 sc_if = device_get_softc(dev);
627 SK_IF_MII_LOCK(sc_if);
628 switch(sc_if->sk_softc->sk_type) {
630 sk_xmac_miibus_statchg(sc_if);
635 sk_marv_miibus_statchg(sc_if);
638 SK_IF_MII_UNLOCK(sc_if);
644 sk_xmac_miibus_readreg(sc_if, phy, reg)
645 struct sk_if_softc *sc_if;
650 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
653 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
654 SK_XM_READ_2(sc_if, XM_PHY_DATA);
655 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
656 for (i = 0; i < SK_TIMEOUT; i++) {
658 if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
659 XM_MMUCMD_PHYDATARDY)
663 if (i == SK_TIMEOUT) {
664 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
669 i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
675 sk_xmac_miibus_writereg(sc_if, phy, reg, val)
676 struct sk_if_softc *sc_if;
681 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
682 for (i = 0; i < SK_TIMEOUT; i++) {
683 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
687 if (i == SK_TIMEOUT) {
688 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
692 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
693 for (i = 0; i < SK_TIMEOUT; i++) {
695 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
699 if_printf(sc_if->sk_ifp, "phy write timed out\n");
705 sk_xmac_miibus_statchg(sc_if)
706 struct sk_if_softc *sc_if;
708 struct mii_data *mii;
710 mii = device_get_softc(sc_if->sk_miibus);
713 * If this is a GMII PHY, manually set the XMAC's
714 * duplex mode accordingly.
716 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
717 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
718 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
720 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
726 sk_marv_miibus_readreg(sc_if, phy, reg)
727 struct sk_if_softc *sc_if;
734 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
735 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
739 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
740 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
742 for (i = 0; i < SK_TIMEOUT; i++) {
744 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
745 if (val & YU_SMICR_READ_VALID)
749 if (i == SK_TIMEOUT) {
750 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
754 val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
760 sk_marv_miibus_writereg(sc_if, phy, reg, val)
761 struct sk_if_softc *sc_if;
766 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
767 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
768 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
770 for (i = 0; i < SK_TIMEOUT; i++) {
772 if ((SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) == 0)
776 if_printf(sc_if->sk_ifp, "phy write timeout\n");
782 sk_marv_miibus_statchg(sc_if)
783 struct sk_if_softc *sc_if;
796 /* Compute CRC for the address value. */
797 crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
799 return (~crc & ((1 << HASH_BITS) - 1));
802 /* gmchash is just a big endian crc */
809 /* Compute CRC for the address value. */
810 crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
812 return (crc & ((1 << HASH_BITS) - 1));
816 sk_setfilt(sc_if, addr, slot)
817 struct sk_if_softc *sc_if;
823 base = XM_RXFILT_ENTRY(slot);
825 SK_XM_WRITE_2(sc_if, base, addr[0]);
826 SK_XM_WRITE_2(sc_if, base + 2, addr[1]);
827 SK_XM_WRITE_2(sc_if, base + 4, addr[2]);
834 struct sk_if_softc *sc_if;
836 struct sk_softc *sc = sc_if->sk_softc;
837 struct ifnet *ifp = sc_if->sk_ifp;
838 u_int32_t hashes[2] = { 0, 0 };
840 struct ifmultiaddr *ifma;
841 u_int16_t dummy[] = { 0, 0, 0 };
842 u_int16_t maddr[(ETHER_ADDR_LEN+1)/2];
844 SK_IF_LOCK_ASSERT(sc_if);
846 /* First, zot all the existing filters. */
847 switch(sc->sk_type) {
849 for (i = 1; i < XM_RXFILT_MAX; i++)
850 sk_setfilt(sc_if, dummy, i);
852 SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
853 SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
858 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
859 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
860 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
861 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
865 /* Now program new ones. */
866 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
867 hashes[0] = 0xFFFFFFFF;
868 hashes[1] = 0xFFFFFFFF;
872 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
873 if (ifma->ifma_addr->sa_family != AF_LINK)
876 * Program the first XM_RXFILT_MAX multicast groups
877 * into the perfect filter. For all others,
878 * use the hash table.
880 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) {
882 (struct sockaddr_dl *)ifma->ifma_addr),
883 maddr, ETHER_ADDR_LEN);
884 sk_setfilt(sc_if, maddr, i);
889 switch(sc->sk_type) {
892 (struct sockaddr_dl *)ifma->ifma_addr),
893 maddr, ETHER_ADDR_LEN);
894 h = sk_xmchash((const uint8_t *)maddr);
900 (struct sockaddr_dl *)ifma->ifma_addr),
901 maddr, ETHER_ADDR_LEN);
902 h = sk_gmchash((const uint8_t *)maddr);
906 hashes[0] |= (1 << h);
908 hashes[1] |= (1 << (h - 32));
913 switch(sc->sk_type) {
915 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
916 XM_MODE_RX_USE_PERFECT);
917 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
918 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
923 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
924 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
925 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
926 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
935 struct sk_if_softc *sc_if;
937 struct sk_softc *sc = sc_if->sk_softc;
938 struct ifnet *ifp = sc_if->sk_ifp;
940 SK_IF_LOCK_ASSERT(sc_if);
942 switch(sc->sk_type) {
944 if (ifp->if_flags & IFF_PROMISC) {
945 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
947 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
953 if (ifp->if_flags & IFF_PROMISC) {
954 SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
955 YU_RCR_UFLEN | YU_RCR_MUFLEN);
957 SK_YU_SETBIT_2(sc_if, YUKON_RCR,
958 YU_RCR_UFLEN | YU_RCR_MUFLEN);
967 sk_init_rx_ring(sc_if)
968 struct sk_if_softc *sc_if;
970 struct sk_ring_data *rd;
972 u_int32_t csum_start;
975 sc_if->sk_cdata.sk_rx_cons = 0;
977 csum_start = (ETHER_HDR_LEN + sizeof(struct ip)) << 16 |
979 rd = &sc_if->sk_rdata;
980 bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
981 for (i = 0; i < SK_RX_RING_CNT; i++) {
982 if (sk_newbuf(sc_if, i) != 0)
984 if (i == (SK_RX_RING_CNT - 1))
985 addr = SK_RX_RING_ADDR(sc_if, 0);
987 addr = SK_RX_RING_ADDR(sc_if, i + 1);
988 rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
989 rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start);
992 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
993 sc_if->sk_cdata.sk_rx_ring_map,
994 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1000 sk_init_jumbo_rx_ring(sc_if)
1001 struct sk_if_softc *sc_if;
1003 struct sk_ring_data *rd;
1005 u_int32_t csum_start;
1008 sc_if->sk_cdata.sk_jumbo_rx_cons = 0;
1010 csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) |
1012 rd = &sc_if->sk_rdata;
1013 bzero(rd->sk_jumbo_rx_ring,
1014 sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT);
1015 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
1016 if (sk_jumbo_newbuf(sc_if, i) != 0)
1018 if (i == (SK_JUMBO_RX_RING_CNT - 1))
1019 addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0);
1021 addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1);
1022 rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
1023 rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start);
1026 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
1027 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
1028 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1034 sk_init_tx_ring(sc_if)
1035 struct sk_if_softc *sc_if;
1037 struct sk_ring_data *rd;
1038 struct sk_txdesc *txd;
1042 STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq);
1043 STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq);
1045 sc_if->sk_cdata.sk_tx_prod = 0;
1046 sc_if->sk_cdata.sk_tx_cons = 0;
1047 sc_if->sk_cdata.sk_tx_cnt = 0;
1049 rd = &sc_if->sk_rdata;
1050 bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
1051 for (i = 0; i < SK_TX_RING_CNT; i++) {
1052 if (i == (SK_TX_RING_CNT - 1))
1053 addr = SK_TX_RING_ADDR(sc_if, 0);
1055 addr = SK_TX_RING_ADDR(sc_if, i + 1);
1056 rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
1057 txd = &sc_if->sk_cdata.sk_txdesc[i];
1058 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
1061 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
1062 sc_if->sk_cdata.sk_tx_ring_map,
1063 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1066 static __inline void
1067 sk_discard_rxbuf(sc_if, idx)
1068 struct sk_if_softc *sc_if;
1071 struct sk_rx_desc *r;
1072 struct sk_rxdesc *rxd;
1076 r = &sc_if->sk_rdata.sk_rx_ring[idx];
1077 rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
1079 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
1082 static __inline void
1083 sk_discard_jumbo_rxbuf(sc_if, idx)
1084 struct sk_if_softc *sc_if;
1087 struct sk_rx_desc *r;
1088 struct sk_rxdesc *rxd;
1091 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
1092 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
1094 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
1098 sk_newbuf(sc_if, idx)
1099 struct sk_if_softc *sc_if;
1102 struct sk_rx_desc *r;
1103 struct sk_rxdesc *rxd;
1105 bus_dma_segment_t segs[1];
1109 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1112 m->m_len = m->m_pkthdr.len = MCLBYTES;
1113 m_adj(m, ETHER_ALIGN);
1115 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag,
1116 sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1120 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1122 rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
1123 if (rxd->rx_m != NULL) {
1124 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
1125 BUS_DMASYNC_POSTREAD);
1126 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap);
1128 map = rxd->rx_dmamap;
1129 rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap;
1130 sc_if->sk_cdata.sk_rx_sparemap = map;
1131 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
1132 BUS_DMASYNC_PREREAD);
1134 r = &sc_if->sk_rdata.sk_rx_ring[idx];
1135 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
1136 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
1137 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1143 sk_jumbo_newbuf(sc_if, idx)
1144 struct sk_if_softc *sc_if;
1147 struct sk_rx_desc *r;
1148 struct sk_rxdesc *rxd;
1150 bus_dma_segment_t segs[1];
1155 MGETHDR(m, M_DONTWAIT, MT_DATA);
1158 buf = sk_jalloc(sc_if);
1163 /* Attach the buffer to the mbuf */
1164 MEXTADD(m, buf, SK_JLEN, sk_jfree, (struct sk_if_softc *)sc_if, 0,
1166 if ((m->m_flags & M_EXT) == 0) {
1170 m->m_pkthdr.len = m->m_len = SK_JLEN;
1172 * Adjust alignment so packet payload begins on a
1173 * longword boundary. Mandatory for Alpha, useful on
1176 m_adj(m, ETHER_ALIGN);
1178 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag,
1179 sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1183 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1185 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
1186 if (rxd->rx_m != NULL) {
1187 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1188 BUS_DMASYNC_POSTREAD);
1189 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
1192 map = rxd->rx_dmamap;
1193 rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap;
1194 sc_if->sk_cdata.sk_jumbo_rx_sparemap = map;
1195 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1196 BUS_DMASYNC_PREREAD);
1198 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
1199 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
1200 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
1201 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1207 * Set media options.
1213 struct sk_if_softc *sc_if = ifp->if_softc;
1214 struct mii_data *mii;
1216 mii = device_get_softc(sc_if->sk_miibus);
1224 * Report current media status.
1227 sk_ifmedia_sts(ifp, ifmr)
1229 struct ifmediareq *ifmr;
1231 struct sk_if_softc *sc_if;
1232 struct mii_data *mii;
1234 sc_if = ifp->if_softc;
1235 mii = device_get_softc(sc_if->sk_miibus);
1238 ifmr->ifm_active = mii->mii_media_active;
1239 ifmr->ifm_status = mii->mii_media_status;
1245 sk_ioctl(ifp, command, data)
1250 struct sk_if_softc *sc_if = ifp->if_softc;
1251 struct ifreq *ifr = (struct ifreq *) data;
1253 struct mii_data *mii;
1259 if (ifr->ifr_mtu > SK_JUMBO_MTU)
1262 ifp->if_mtu = ifr->ifr_mtu;
1263 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1264 sk_init_locked(sc_if);
1266 SK_IF_UNLOCK(sc_if);
1270 if (ifp->if_flags & IFF_UP) {
1271 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1272 if ((ifp->if_flags ^ sc_if->sk_if_flags)
1274 sk_setpromisc(sc_if);
1278 sk_init_locked(sc_if);
1280 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1283 sc_if->sk_if_flags = ifp->if_flags;
1284 SK_IF_UNLOCK(sc_if);
1289 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1291 SK_IF_UNLOCK(sc_if);
1295 mii = device_get_softc(sc_if->sk_miibus);
1296 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1300 if (sc_if->sk_softc->sk_type == SK_GENESIS) {
1301 SK_IF_UNLOCK(sc_if);
1304 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1305 if (mask & IFCAP_HWCSUM) {
1306 ifp->if_capenable ^= IFCAP_HWCSUM;
1307 if (IFCAP_HWCSUM & ifp->if_capenable &&
1308 IFCAP_HWCSUM & ifp->if_capabilities)
1309 ifp->if_hwassist = SK_CSUM_FEATURES;
1311 ifp->if_hwassist = 0;
1313 SK_IF_UNLOCK(sc_if);
1316 error = ether_ioctl(ifp, command, data);
1324 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1325 * IDs against our list and return a device name if we find a match.
1331 struct sk_type *t = sk_devs;
1333 while(t->sk_name != NULL) {
1334 if ((pci_get_vendor(dev) == t->sk_vid) &&
1335 (pci_get_device(dev) == t->sk_did)) {
1337 * Only attach to rev. 2 of the Linksys EG1032 adapter.
1338 * Rev. 3 is supported by re(4).
1340 if ((t->sk_vid == VENDORID_LINKSYS) &&
1341 (t->sk_did == DEVICEID_LINKSYS_EG1032) &&
1342 (pci_get_subdevice(dev) !=
1343 SUBDEVICEID_LINKSYS_EG1032_REV2)) {
1347 device_set_desc(dev, t->sk_name);
1348 return (BUS_PROBE_DEFAULT);
1357 * Force the GEnesis into reset, then bring it out of reset.
1361 struct sk_softc *sc;
1364 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1365 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1366 if (SK_YUKON_FAMILY(sc->sk_type))
1367 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1370 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1372 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1373 if (SK_YUKON_FAMILY(sc->sk_type))
1374 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1376 if (sc->sk_type == SK_GENESIS) {
1377 /* Configure packet arbiter */
1378 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1379 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1380 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1381 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1382 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1385 /* Enable RAM interface */
1386 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1389 * Configure interrupt moderation. The moderation timer
1390 * defers interrupts specified in the interrupt moderation
1391 * timer mask based on the timeout specified in the interrupt
1392 * moderation timer init register. Each bit in the timer
1393 * register represents one tick, so to specify a timeout in
1394 * microseconds, we have to multiply by the correct number of
1395 * ticks-per-microsecond.
1397 switch (sc->sk_type) {
1399 sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS;
1402 sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON;
1406 device_printf(sc->sk_dev, "interrupt moderation is %d us\n",
1408 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
1410 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1411 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1412 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1421 struct sk_softc *sc;
1423 sc = device_get_softc(device_get_parent(dev));
1426 * Not much to do here. We always know there will be
1427 * at least one XMAC present, and if there are two,
1428 * skc_attach() will create a second device instance
1431 switch (sc->sk_type) {
1433 device_set_desc(dev, "XaQti Corp. XMAC II");
1438 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1442 return (BUS_PROBE_DEFAULT);
1446 * Each XMAC chip is attached as a separate logical IP interface.
1447 * Single port cards will have only one logical interface of course.
1453 struct sk_softc *sc;
1454 struct sk_if_softc *sc_if;
1463 sc_if = device_get_softc(dev);
1464 sc = device_get_softc(device_get_parent(dev));
1465 port = *(int *)device_get_ivars(dev);
1467 sc_if->sk_if_dev = dev;
1468 sc_if->sk_port = port;
1469 sc_if->sk_softc = sc;
1470 sc->sk_if[port] = sc_if;
1471 if (port == SK_PORT_A)
1472 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1473 if (port == SK_PORT_B)
1474 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1476 callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0);
1477 callout_init_mtx(&sc_if->sk_watchdog_ch, &sc_if->sk_softc->sk_mtx, 0);
1479 if (sk_dma_alloc(sc_if) != 0) {
1484 ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
1486 device_printf(sc_if->sk_if_dev, "can not if_alloc()\n");
1490 ifp->if_softc = sc_if;
1491 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1492 ifp->if_mtu = ETHERMTU;
1493 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1495 * SK_GENESIS has a bug in checksum offload - From linux.
1497 if (sc_if->sk_softc->sk_type != SK_GENESIS) {
1498 ifp->if_capabilities = IFCAP_HWCSUM;
1499 ifp->if_hwassist = SK_CSUM_FEATURES;
1501 ifp->if_capabilities = 0;
1502 ifp->if_hwassist = 0;
1504 ifp->if_capenable = ifp->if_capabilities;
1505 ifp->if_ioctl = sk_ioctl;
1506 ifp->if_start = sk_start;
1508 ifp->if_watchdog = NULL;
1509 ifp->if_init = sk_init;
1510 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1511 ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
1512 IFQ_SET_READY(&ifp->if_snd);
1515 * Get station address for this interface. Note that
1516 * dual port cards actually come with three station
1517 * addresses: one for each port, plus an extra. The
1518 * extra one is used by the SysKonnect driver software
1519 * as a 'virtual' station address for when both ports
1520 * are operating in failover mode. Currently we don't
1521 * use this extra address.
1524 for (i = 0; i < ETHER_ADDR_LEN; i++)
1526 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1529 * Set up RAM buffer addresses. The NIC will have a certain
1530 * amount of SRAM on it, somewhere between 512K and 2MB. We
1531 * need to divide this up a) between the transmitter and
1532 * receiver and b) between the two XMACs, if this is a
1533 * dual port NIC. Our algotithm is to divide up the memory
1534 * evenly so that everyone gets a fair share.
1536 * Just to be contrary, Yukon2 appears to have separate memory
1539 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1540 u_int32_t chunk, val;
1542 chunk = sc->sk_ramsize / 2;
1543 val = sc->sk_rboff / sizeof(u_int64_t);
1544 sc_if->sk_rx_ramstart = val;
1545 val += (chunk / sizeof(u_int64_t));
1546 sc_if->sk_rx_ramend = val - 1;
1547 sc_if->sk_tx_ramstart = val;
1548 val += (chunk / sizeof(u_int64_t));
1549 sc_if->sk_tx_ramend = val - 1;
1551 u_int32_t chunk, val;
1553 chunk = sc->sk_ramsize / 4;
1554 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1556 sc_if->sk_rx_ramstart = val;
1557 val += (chunk / sizeof(u_int64_t));
1558 sc_if->sk_rx_ramend = val - 1;
1559 sc_if->sk_tx_ramstart = val;
1560 val += (chunk / sizeof(u_int64_t));
1561 sc_if->sk_tx_ramend = val - 1;
1564 /* Read and save PHY type and set PHY address */
1565 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1566 if (!SK_YUKON_FAMILY(sc->sk_type)) {
1567 switch(sc_if->sk_phytype) {
1568 case SK_PHYTYPE_XMAC:
1569 sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1571 case SK_PHYTYPE_BCOM:
1572 sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1575 device_printf(sc->sk_dev, "unsupported PHY type: %d\n",
1578 SK_IF_UNLOCK(sc_if);
1582 if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
1583 sc->sk_pmd != 'S') {
1584 /* not initialized, punt */
1585 sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
1586 sc->sk_coppertype = 1;
1589 sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1591 if (!(sc->sk_coppertype))
1592 sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
1596 * Call MI attach routine. Can't hold locks when calling into ether_*.
1598 SK_IF_UNLOCK(sc_if);
1599 ether_ifattach(ifp, eaddr);
1603 * The hardware should be ready for VLAN_MTU by default:
1604 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
1605 * YU_SMR_MFL_VLAN is set by this driver in Yukon.
1608 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1609 ifp->if_capenable |= IFCAP_VLAN_MTU;
1611 * Tell the upper layer(s) we support long frames.
1612 * Must appear after the call to ether_ifattach() because
1613 * ether_ifattach() sets ifi_hdrlen to the default value.
1615 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1620 switch (sc->sk_type) {
1622 sk_init_xmac(sc_if);
1627 sk_init_yukon(sc_if);
1631 SK_IF_UNLOCK(sc_if);
1632 if (mii_phy_probe(dev, &sc_if->sk_miibus,
1633 sk_ifmedia_upd, sk_ifmedia_sts)) {
1634 device_printf(sc_if->sk_if_dev, "no PHY found!\n");
1635 ether_ifdetach(ifp);
1642 /* Access should be ok even though lock has been dropped */
1643 sc->sk_if[port] = NULL;
1651 * Attach the interface. Allocate softc structures, do ifmedia
1652 * setup and ethernet/BPF attach.
1658 struct sk_softc *sc;
1659 int error = 0, *port, rid;
1661 char *pname, *revstr;
1663 sc = device_get_softc(dev);
1666 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1668 mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF);
1670 * Map control/status registers.
1672 pci_enable_busmaster(dev);
1675 sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE);
1677 if (sc->sk_res == NULL) {
1678 device_printf(dev, "couldn't map ports/memory\n");
1683 sc->sk_btag = rman_get_bustag(sc->sk_res);
1684 sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1686 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1687 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
1689 /* Bail out if chip is not recognized. */
1690 if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
1691 device_printf(dev, "unknown device: chipver=%02x, rev=%x\n",
1692 sc->sk_type, sc->sk_rev);
1697 /* Allocate interrupt */
1699 sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1700 RF_SHAREABLE | RF_ACTIVE);
1702 if (sc->sk_irq == NULL) {
1703 device_printf(dev, "couldn't map interrupt\n");
1708 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1709 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1710 OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW,
1711 &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I",
1712 "SK interrupt moderation");
1714 /* Pull in device tunables. */
1715 sc->sk_int_mod = SK_IM_DEFAULT;
1716 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1717 "int_mod", &sc->sk_int_mod);
1719 if (sc->sk_int_mod < SK_IM_MIN ||
1720 sc->sk_int_mod > SK_IM_MAX) {
1721 device_printf(dev, "int_mod value out of range; "
1722 "using default: %d\n", SK_IM_DEFAULT);
1723 sc->sk_int_mod = SK_IM_DEFAULT;
1727 /* Reset the adapter. */
1730 /* Read and save vital product data from EEPROM. */
1733 skrs = sk_win_read_1(sc, SK_EPROM0);
1734 if (sc->sk_type == SK_GENESIS) {
1735 /* Read and save RAM size and RAMbuffer offset */
1737 case SK_RAMSIZE_512K_64:
1738 sc->sk_ramsize = 0x80000;
1739 sc->sk_rboff = SK_RBOFF_0;
1741 case SK_RAMSIZE_1024K_64:
1742 sc->sk_ramsize = 0x100000;
1743 sc->sk_rboff = SK_RBOFF_80000;
1745 case SK_RAMSIZE_1024K_128:
1746 sc->sk_ramsize = 0x100000;
1747 sc->sk_rboff = SK_RBOFF_0;
1749 case SK_RAMSIZE_2048K_128:
1750 sc->sk_ramsize = 0x200000;
1751 sc->sk_rboff = SK_RBOFF_0;
1754 device_printf(dev, "unknown ram size: %d\n", skrs);
1758 } else { /* SK_YUKON_FAMILY */
1760 sc->sk_ramsize = 0x20000;
1762 sc->sk_ramsize = skrs * (1<<12);
1763 sc->sk_rboff = SK_RBOFF_0;
1766 /* Read and save physical media type */
1767 sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1769 if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
1770 sc->sk_coppertype = 1;
1772 sc->sk_coppertype = 0;
1774 /* Determine whether to name it with VPD PN or just make it up.
1775 * Marvell Yukon VPD PN seems to freqently be bogus. */
1776 switch (pci_get_device(dev)) {
1777 case DEVICEID_SK_V1:
1778 case DEVICEID_BELKIN_5005:
1779 case DEVICEID_3COM_3C940:
1780 case DEVICEID_LINKSYS_EG1032:
1781 case DEVICEID_DLINK_DGE530T_A1:
1782 case DEVICEID_DLINK_DGE530T_B1:
1783 /* Stay with VPD PN. */
1784 pname = sc->sk_vpd_prodname;
1786 case DEVICEID_SK_V2:
1787 /* YUKON VPD PN might bear no resemblance to reality. */
1788 switch (sc->sk_type) {
1790 /* Stay with VPD PN. */
1791 pname = sc->sk_vpd_prodname;
1794 pname = "Marvell Yukon Gigabit Ethernet";
1797 pname = "Marvell Yukon Lite Gigabit Ethernet";
1800 pname = "Marvell Yukon LP Gigabit Ethernet";
1803 pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
1807 /* Yukon Lite Rev. A0 needs special test. */
1808 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1812 /* Save flash address register before testing. */
1813 far = sk_win_read_4(sc, SK_EP_ADDR);
1815 sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
1816 testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
1818 if (testbyte != 0x00) {
1819 /* Yukon Lite Rev. A0 detected. */
1820 sc->sk_type = SK_YUKON_LITE;
1821 sc->sk_rev = SK_YUKON_LITE_REV_A0;
1822 /* Restore flash address register. */
1823 sk_win_write_4(sc, SK_EP_ADDR, far);
1828 device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
1829 "chipver=%02x, rev=%x\n",
1830 pci_get_vendor(dev), pci_get_device(dev),
1831 sc->sk_type, sc->sk_rev);
1836 if (sc->sk_type == SK_YUKON_LITE) {
1837 switch (sc->sk_rev) {
1838 case SK_YUKON_LITE_REV_A0:
1841 case SK_YUKON_LITE_REV_A1:
1844 case SK_YUKON_LITE_REV_A3:
1855 /* Announce the product name and more VPD data if there. */
1856 device_printf(dev, "%s rev. %s(0x%x)\n",
1857 pname != NULL ? pname : "<unknown>", revstr, sc->sk_rev);
1860 if (sc->sk_vpd_readonly != NULL &&
1861 sc->sk_vpd_readonly_len != 0) {
1863 char *dp = sc->sk_vpd_readonly;
1864 uint16_t l, len = sc->sk_vpd_readonly_len;
1867 if ((*dp == 'P' && *(dp+1) == 'N') ||
1868 (*dp == 'E' && *(dp+1) == 'C') ||
1869 (*dp == 'M' && *(dp+1) == 'N') ||
1870 (*dp == 'S' && *(dp+1) == 'N')) {
1872 while (l < *(dp+2)) {
1877 device_printf(dev, "%c%c: %s\n",
1882 len -= (3 + *(dp+2));
1883 dp += (3 + *(dp+2));
1887 device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type);
1888 device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev);
1889 device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs);
1890 device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
1893 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1894 if (sc->sk_devs[SK_PORT_A] == NULL) {
1895 device_printf(dev, "failed to add child for PORT_A\n");
1899 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1901 device_printf(dev, "failed to allocate memory for "
1902 "ivars of PORT_A\n");
1907 device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1909 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1910 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1911 if (sc->sk_devs[SK_PORT_B] == NULL) {
1912 device_printf(dev, "failed to add child for PORT_B\n");
1916 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1918 device_printf(dev, "failed to allocate memory for "
1919 "ivars of PORT_B\n");
1924 device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1927 /* Turn on the 'driver is loaded' LED. */
1928 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1930 error = bus_generic_attach(dev);
1932 device_printf(dev, "failed to attach port(s)\n");
1936 /* Hook interrupt last to avoid having to lock softc */
1937 error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET|INTR_MPSAFE,
1938 sk_intr, sc, &sc->sk_intrhand);
1941 device_printf(dev, "couldn't set up irq\n");
1953 * Shutdown hardware and free up resources. This can be called any
1954 * time after the mutex has been initialized. It is called in both
1955 * the error case in attach and the normal detach case so it needs
1956 * to be careful about only freeing resources that have actually been
1963 struct sk_if_softc *sc_if;
1966 sc_if = device_get_softc(dev);
1967 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1968 ("sk mutex not initialized in sk_detach"));
1971 ifp = sc_if->sk_ifp;
1972 /* These should only be active if attach_xmac succeeded */
1973 if (device_is_attached(dev)) {
1975 /* Can't hold locks while calling detach */
1976 SK_IF_UNLOCK(sc_if);
1977 callout_drain(&sc_if->sk_tick_ch);
1978 callout_drain(&sc_if->sk_watchdog_ch);
1979 ether_ifdetach(ifp);
1985 * We're generally called from skc_detach() which is using
1986 * device_delete_child() to get to here. It's already trashed
1987 * miibus for us, so don't do it here or we'll panic.
1990 if (sc_if->sk_miibus != NULL)
1991 device_delete_child(dev, sc_if->sk_miibus);
1993 bus_generic_detach(dev);
1995 SK_IF_UNLOCK(sc_if);
2004 struct sk_softc *sc;
2006 sc = device_get_softc(dev);
2007 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
2009 if (device_is_alive(dev)) {
2010 if (sc->sk_devs[SK_PORT_A] != NULL) {
2011 free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
2012 device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
2014 if (sc->sk_devs[SK_PORT_B] != NULL) {
2015 free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
2016 device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
2018 bus_generic_detach(dev);
2021 if (sc->sk_vpd_prodname != NULL)
2022 free(sc->sk_vpd_prodname, M_DEVBUF);
2023 if (sc->sk_vpd_readonly != NULL)
2024 free(sc->sk_vpd_readonly, M_DEVBUF);
2026 if (sc->sk_intrhand)
2027 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
2029 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
2031 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
2033 mtx_destroy(&sc->sk_mii_mtx);
2034 mtx_destroy(&sc->sk_mtx);
2039 struct sk_dmamap_arg {
2040 bus_addr_t sk_busaddr;
2044 sk_dmamap_cb(arg, segs, nseg, error)
2046 bus_dma_segment_t *segs;
2050 struct sk_dmamap_arg *ctx;
2056 ctx->sk_busaddr = segs[0].ds_addr;
2060 * Allocate jumbo buffer storage. The SysKonnect adapters support
2061 * "jumbograms" (9K frames), although SysKonnect doesn't currently
2062 * use them in their drivers. In order for us to use them, we need
2063 * large 9K receive buffers, however standard mbuf clusters are only
2064 * 2048 bytes in size. Consequently, we need to allocate and manage
2065 * our own jumbo buffer pool. Fortunately, this does not require an
2066 * excessive amount of additional code.
2070 struct sk_if_softc *sc_if;
2072 struct sk_dmamap_arg ctx;
2073 struct sk_txdesc *txd;
2074 struct sk_rxdesc *rxd;
2075 struct sk_rxdesc *jrxd;
2077 struct sk_jpool_entry *entry;
2080 mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF);
2081 SLIST_INIT(&sc_if->sk_jfree_listhead);
2082 SLIST_INIT(&sc_if->sk_jinuse_listhead);
2084 /* create parent tag */
2087 * This driver should use BUS_SPACE_MAXADDR for lowaddr argument
2088 * in bus_dma_tag_create(9) as the NIC would support DAC mode.
2089 * However bz@ reported that it does not work on amd64 with > 4GB
2090 * RAM. Until we have more clues of the breakage, disable DAC mode
2091 * by limiting DMA address to be in 32bit address space.
2093 error = bus_dma_tag_create(NULL, /* parent */
2094 1, 0, /* algnmnt, boundary */
2095 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2096 BUS_SPACE_MAXADDR, /* highaddr */
2097 NULL, NULL, /* filter, filterarg */
2098 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2100 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
2102 NULL, NULL, /* lockfunc, lockarg */
2103 &sc_if->sk_cdata.sk_parent_tag);
2105 device_printf(sc_if->sk_if_dev,
2106 "failed to create parent DMA tag\n");
2109 /* create tag for Tx ring */
2110 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2111 SK_RING_ALIGN, 0, /* algnmnt, boundary */
2112 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2113 BUS_SPACE_MAXADDR, /* highaddr */
2114 NULL, NULL, /* filter, filterarg */
2115 SK_TX_RING_SZ, /* maxsize */
2117 SK_TX_RING_SZ, /* maxsegsize */
2119 NULL, NULL, /* lockfunc, lockarg */
2120 &sc_if->sk_cdata.sk_tx_ring_tag);
2122 device_printf(sc_if->sk_if_dev,
2123 "failed to allocate Tx ring DMA tag\n");
2127 /* create tag for Rx ring */
2128 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2129 SK_RING_ALIGN, 0, /* algnmnt, boundary */
2130 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2131 BUS_SPACE_MAXADDR, /* highaddr */
2132 NULL, NULL, /* filter, filterarg */
2133 SK_RX_RING_SZ, /* maxsize */
2135 SK_RX_RING_SZ, /* maxsegsize */
2137 NULL, NULL, /* lockfunc, lockarg */
2138 &sc_if->sk_cdata.sk_rx_ring_tag);
2140 device_printf(sc_if->sk_if_dev,
2141 "failed to allocate Rx ring DMA tag\n");
2145 /* create tag for jumbo Rx ring */
2146 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2147 SK_RING_ALIGN, 0, /* algnmnt, boundary */
2148 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2149 BUS_SPACE_MAXADDR, /* highaddr */
2150 NULL, NULL, /* filter, filterarg */
2151 SK_JUMBO_RX_RING_SZ, /* maxsize */
2153 SK_JUMBO_RX_RING_SZ, /* maxsegsize */
2155 NULL, NULL, /* lockfunc, lockarg */
2156 &sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2158 device_printf(sc_if->sk_if_dev,
2159 "failed to allocate jumbo Rx ring DMA tag\n");
2163 /* create tag for jumbo buffer blocks */
2164 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2165 PAGE_SIZE, 0, /* algnmnt, boundary */
2166 BUS_SPACE_MAXADDR, /* lowaddr */
2167 BUS_SPACE_MAXADDR, /* highaddr */
2168 NULL, NULL, /* filter, filterarg */
2169 SK_JMEM, /* maxsize */
2171 SK_JMEM, /* maxsegsize */
2173 NULL, NULL, /* lockfunc, lockarg */
2174 &sc_if->sk_cdata.sk_jumbo_tag);
2176 device_printf(sc_if->sk_if_dev,
2177 "failed to allocate jumbo Rx buffer block DMA tag\n");
2181 /* create tag for Tx buffers */
2182 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2183 1, 0, /* algnmnt, boundary */
2184 BUS_SPACE_MAXADDR, /* lowaddr */
2185 BUS_SPACE_MAXADDR, /* highaddr */
2186 NULL, NULL, /* filter, filterarg */
2187 MCLBYTES * SK_MAXTXSEGS, /* maxsize */
2188 SK_MAXTXSEGS, /* nsegments */
2189 MCLBYTES, /* maxsegsize */
2191 NULL, NULL, /* lockfunc, lockarg */
2192 &sc_if->sk_cdata.sk_tx_tag);
2194 device_printf(sc_if->sk_if_dev,
2195 "failed to allocate Tx DMA tag\n");
2199 /* create tag for Rx buffers */
2200 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2201 1, 0, /* algnmnt, boundary */
2202 BUS_SPACE_MAXADDR, /* lowaddr */
2203 BUS_SPACE_MAXADDR, /* highaddr */
2204 NULL, NULL, /* filter, filterarg */
2205 MCLBYTES, /* maxsize */
2207 MCLBYTES, /* maxsegsize */
2209 NULL, NULL, /* lockfunc, lockarg */
2210 &sc_if->sk_cdata.sk_rx_tag);
2212 device_printf(sc_if->sk_if_dev,
2213 "failed to allocate Rx DMA tag\n");
2217 /* create tag for jumbo Rx buffers */
2218 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2219 PAGE_SIZE, 0, /* algnmnt, boundary */
2220 BUS_SPACE_MAXADDR, /* lowaddr */
2221 BUS_SPACE_MAXADDR, /* highaddr */
2222 NULL, NULL, /* filter, filterarg */
2223 MCLBYTES * SK_MAXRXSEGS, /* maxsize */
2224 SK_MAXRXSEGS, /* nsegments */
2225 SK_JLEN, /* maxsegsize */
2227 NULL, NULL, /* lockfunc, lockarg */
2228 &sc_if->sk_cdata.sk_jumbo_rx_tag);
2230 device_printf(sc_if->sk_if_dev,
2231 "failed to allocate jumbo Rx DMA tag\n");
2235 /* allocate DMA'able memory and load the DMA map for Tx ring */
2236 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag,
2237 (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
2238 &sc_if->sk_cdata.sk_tx_ring_map);
2240 device_printf(sc_if->sk_if_dev,
2241 "failed to allocate DMA'able memory for Tx ring\n");
2246 error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag,
2247 sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring,
2248 SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2250 device_printf(sc_if->sk_if_dev,
2251 "failed to load DMA'able memory for Tx ring\n");
2254 sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr;
2256 /* allocate DMA'able memory and load the DMA map for Rx ring */
2257 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag,
2258 (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
2259 &sc_if->sk_cdata.sk_rx_ring_map);
2261 device_printf(sc_if->sk_if_dev,
2262 "failed to allocate DMA'able memory for Rx ring\n");
2267 error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag,
2268 sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring,
2269 SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2271 device_printf(sc_if->sk_if_dev,
2272 "failed to load DMA'able memory for Rx ring\n");
2275 sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr;
2277 /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */
2278 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2279 (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring,
2280 BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2282 device_printf(sc_if->sk_if_dev,
2283 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2288 error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2289 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2290 sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb,
2291 &ctx, BUS_DMA_NOWAIT);
2293 device_printf(sc_if->sk_if_dev,
2294 "failed to load DMA'able memory for jumbo Rx ring\n");
2297 sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr;
2299 /* create DMA maps for Tx buffers */
2300 for (i = 0; i < SK_TX_RING_CNT; i++) {
2301 txd = &sc_if->sk_cdata.sk_txdesc[i];
2304 error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0,
2307 device_printf(sc_if->sk_if_dev,
2308 "failed to create Tx dmamap\n");
2312 /* create DMA maps for Rx buffers */
2313 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2314 &sc_if->sk_cdata.sk_rx_sparemap)) != 0) {
2315 device_printf(sc_if->sk_if_dev,
2316 "failed to create spare Rx dmamap\n");
2319 for (i = 0; i < SK_RX_RING_CNT; i++) {
2320 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2323 error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2326 device_printf(sc_if->sk_if_dev,
2327 "failed to create Rx dmamap\n");
2331 /* create DMA maps for jumbo Rx buffers */
2332 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2333 &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) {
2334 device_printf(sc_if->sk_if_dev,
2335 "failed to create spare jumbo Rx dmamap\n");
2338 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2339 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2341 jrxd->rx_dmamap = 0;
2342 error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2345 device_printf(sc_if->sk_if_dev,
2346 "failed to create jumbo Rx dmamap\n");
2351 /* allocate DMA'able memory and load the DMA map for jumbo buf */
2352 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_tag,
2353 (void **)&sc_if->sk_rdata.sk_jumbo_buf,
2354 BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_map);
2356 device_printf(sc_if->sk_if_dev,
2357 "failed to allocate DMA'able memory for jumbo buf\n");
2362 error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_tag,
2363 sc_if->sk_cdata.sk_jumbo_map,
2364 sc_if->sk_rdata.sk_jumbo_buf, SK_JMEM, sk_dmamap_cb,
2365 &ctx, BUS_DMA_NOWAIT);
2367 device_printf(sc_if->sk_if_dev,
2368 "failed to load DMA'able memory for jumbobuf\n");
2371 sc_if->sk_rdata.sk_jumbo_buf_paddr = ctx.sk_busaddr;
2374 * Now divide it up into 9K pieces and save the addresses
2377 ptr = sc_if->sk_rdata.sk_jumbo_buf;
2378 for (i = 0; i < SK_JSLOTS; i++) {
2379 sc_if->sk_cdata.sk_jslots[i] = ptr;
2381 entry = malloc(sizeof(struct sk_jpool_entry),
2382 M_DEVBUF, M_NOWAIT);
2383 if (entry == NULL) {
2384 device_printf(sc_if->sk_if_dev,
2385 "no memory for jumbo buffers!\n");
2390 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry,
2400 struct sk_if_softc *sc_if;
2402 struct sk_txdesc *txd;
2403 struct sk_rxdesc *rxd;
2404 struct sk_rxdesc *jrxd;
2405 struct sk_jpool_entry *entry;
2408 SK_JLIST_LOCK(sc_if);
2409 while ((entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead))) {
2410 device_printf(sc_if->sk_if_dev,
2411 "asked to free buffer that is in use!\n");
2412 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
2413 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry,
2417 while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) {
2418 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
2419 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
2420 free(entry, M_DEVBUF);
2422 SK_JLIST_UNLOCK(sc_if);
2424 /* destroy jumbo buffer block */
2425 if (sc_if->sk_cdata.sk_jumbo_map)
2426 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_tag,
2427 sc_if->sk_cdata.sk_jumbo_map);
2429 if (sc_if->sk_rdata.sk_jumbo_buf) {
2430 bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_tag,
2431 sc_if->sk_rdata.sk_jumbo_buf,
2432 sc_if->sk_cdata.sk_jumbo_map);
2433 sc_if->sk_rdata.sk_jumbo_buf = NULL;
2434 sc_if->sk_cdata.sk_jumbo_map = 0;
2438 if (sc_if->sk_cdata.sk_tx_ring_tag) {
2439 if (sc_if->sk_cdata.sk_tx_ring_map)
2440 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag,
2441 sc_if->sk_cdata.sk_tx_ring_map);
2442 if (sc_if->sk_cdata.sk_tx_ring_map &&
2443 sc_if->sk_rdata.sk_tx_ring)
2444 bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag,
2445 sc_if->sk_rdata.sk_tx_ring,
2446 sc_if->sk_cdata.sk_tx_ring_map);
2447 sc_if->sk_rdata.sk_tx_ring = NULL;
2448 sc_if->sk_cdata.sk_tx_ring_map = 0;
2449 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag);
2450 sc_if->sk_cdata.sk_tx_ring_tag = NULL;
2453 if (sc_if->sk_cdata.sk_rx_ring_tag) {
2454 if (sc_if->sk_cdata.sk_rx_ring_map)
2455 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag,
2456 sc_if->sk_cdata.sk_rx_ring_map);
2457 if (sc_if->sk_cdata.sk_rx_ring_map &&
2458 sc_if->sk_rdata.sk_rx_ring)
2459 bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag,
2460 sc_if->sk_rdata.sk_rx_ring,
2461 sc_if->sk_cdata.sk_rx_ring_map);
2462 sc_if->sk_rdata.sk_rx_ring = NULL;
2463 sc_if->sk_cdata.sk_rx_ring_map = 0;
2464 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag);
2465 sc_if->sk_cdata.sk_rx_ring_tag = NULL;
2468 if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) {
2469 if (sc_if->sk_cdata.sk_jumbo_rx_ring_map)
2470 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2471 sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2472 if (sc_if->sk_cdata.sk_jumbo_rx_ring_map &&
2473 sc_if->sk_rdata.sk_jumbo_rx_ring)
2474 bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2475 sc_if->sk_rdata.sk_jumbo_rx_ring,
2476 sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2477 sc_if->sk_rdata.sk_jumbo_rx_ring = NULL;
2478 sc_if->sk_cdata.sk_jumbo_rx_ring_map = 0;
2479 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2480 sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL;
2483 if (sc_if->sk_cdata.sk_tx_tag) {
2484 for (i = 0; i < SK_TX_RING_CNT; i++) {
2485 txd = &sc_if->sk_cdata.sk_txdesc[i];
2486 if (txd->tx_dmamap) {
2487 bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag,
2492 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag);
2493 sc_if->sk_cdata.sk_tx_tag = NULL;
2496 if (sc_if->sk_cdata.sk_rx_tag) {
2497 for (i = 0; i < SK_RX_RING_CNT; i++) {
2498 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2499 if (rxd->rx_dmamap) {
2500 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2505 if (sc_if->sk_cdata.sk_rx_sparemap) {
2506 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2507 sc_if->sk_cdata.sk_rx_sparemap);
2508 sc_if->sk_cdata.sk_rx_sparemap = 0;
2510 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag);
2511 sc_if->sk_cdata.sk_rx_tag = NULL;
2513 /* jumbo Rx buffers */
2514 if (sc_if->sk_cdata.sk_jumbo_rx_tag) {
2515 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2516 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2517 if (jrxd->rx_dmamap) {
2519 sc_if->sk_cdata.sk_jumbo_rx_tag,
2521 jrxd->rx_dmamap = 0;
2524 if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) {
2525 bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag,
2526 sc_if->sk_cdata.sk_jumbo_rx_sparemap);
2527 sc_if->sk_cdata.sk_jumbo_rx_sparemap = 0;
2529 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag);
2530 sc_if->sk_cdata.sk_jumbo_rx_tag = NULL;
2533 if (sc_if->sk_cdata.sk_parent_tag) {
2534 bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag);
2535 sc_if->sk_cdata.sk_parent_tag = NULL;
2537 mtx_destroy(&sc_if->sk_jlist_mtx);
2541 * Allocate a jumbo buffer.
2545 struct sk_if_softc *sc_if;
2547 struct sk_jpool_entry *entry;
2549 SK_JLIST_LOCK(sc_if);
2551 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
2553 if (entry == NULL) {
2554 SK_JLIST_UNLOCK(sc_if);
2558 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
2559 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
2561 SK_JLIST_UNLOCK(sc_if);
2563 return (sc_if->sk_cdata.sk_jslots[entry->slot]);
2567 * Release a jumbo buffer.
2574 struct sk_if_softc *sc_if;
2575 struct sk_jpool_entry *entry;
2578 /* Extract the softc struct pointer. */
2579 sc_if = (struct sk_if_softc *)args;
2580 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__));
2582 SK_JLIST_LOCK(sc_if);
2583 /* calculate the slot this buffer belongs to */
2584 i = ((vm_offset_t)buf
2585 - (vm_offset_t)sc_if->sk_rdata.sk_jumbo_buf) / SK_JLEN;
2586 KASSERT(i >= 0 && i < SK_JSLOTS,
2587 ("%s: asked to free buffer that we don't manage!", __func__));
2589 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
2590 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
2592 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
2593 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
2594 if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead))
2597 SK_JLIST_UNLOCK(sc_if);
2601 sk_txcksum(ifp, m, f)
2604 struct sk_tx_desc *f;
2610 offset = sizeof(struct ip) + ETHER_HDR_LEN;
2611 for(; m && m->m_len == 0; m = m->m_next)
2613 if (m == NULL || m->m_len < ETHER_HDR_LEN) {
2614 if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__);
2615 /* checksum may be corrupted */
2618 if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
2619 if (m->m_len != ETHER_HDR_LEN) {
2620 if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n",
2622 /* checksum may be corrupted */
2625 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
2628 offset = sizeof(struct ip) + ETHER_HDR_LEN;
2629 /* checksum may be corrupted */
2632 ip = mtod(m, struct ip *);
2634 p = mtod(m, u_int8_t *);
2636 ip = (struct ip *)p;
2638 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
2641 f->sk_csum_startval = 0;
2642 f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) |
2647 sk_encap(sc_if, m_head)
2648 struct sk_if_softc *sc_if;
2649 struct mbuf **m_head;
2651 struct sk_txdesc *txd;
2652 struct sk_tx_desc *f = NULL;
2654 bus_dma_segment_t txsegs[SK_MAXTXSEGS];
2655 u_int32_t cflags, frag, si, sk_ctl;
2658 SK_IF_LOCK_ASSERT(sc_if);
2660 if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL)
2663 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2664 txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2665 if (error == EFBIG) {
2666 m = m_defrag(*m_head, M_DONTWAIT);
2673 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2674 txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2680 } else if (error != 0)
2687 if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) {
2688 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2693 if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0)
2694 cflags = SK_OPCODE_CSUM;
2696 cflags = SK_OPCODE_DEFAULT;
2697 si = frag = sc_if->sk_cdata.sk_tx_prod;
2698 for (i = 0; i < nseg; i++) {
2699 f = &sc_if->sk_rdata.sk_tx_ring[frag];
2700 f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr));
2701 f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr));
2702 sk_ctl = txsegs[i].ds_len | cflags;
2704 if (cflags == SK_OPCODE_CSUM)
2705 sk_txcksum(sc_if->sk_ifp, m, f);
2706 sk_ctl |= SK_TXCTL_FIRSTFRAG;
2708 sk_ctl |= SK_TXCTL_OWN;
2709 f->sk_ctl = htole32(sk_ctl);
2710 sc_if->sk_cdata.sk_tx_cnt++;
2711 SK_INC(frag, SK_TX_RING_CNT);
2713 sc_if->sk_cdata.sk_tx_prod = frag;
2715 /* set EOF on the last desciptor */
2716 frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT;
2717 f = &sc_if->sk_rdata.sk_tx_ring[frag];
2718 f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR);
2720 /* turn the first descriptor ownership to NIC */
2721 f = &sc_if->sk_rdata.sk_tx_ring[si];
2722 f->sk_ctl |= htole32(SK_TXCTL_OWN);
2724 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q);
2725 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q);
2728 /* sync descriptors */
2729 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2730 BUS_DMASYNC_PREWRITE);
2731 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2732 sc_if->sk_cdata.sk_tx_ring_map,
2733 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2742 struct sk_if_softc *sc_if;
2744 sc_if = ifp->if_softc;
2747 sk_start_locked(ifp);
2748 SK_IF_UNLOCK(sc_if);
2754 sk_start_locked(ifp)
2757 struct sk_softc *sc;
2758 struct sk_if_softc *sc_if;
2759 struct mbuf *m_head;
2762 sc_if = ifp->if_softc;
2763 sc = sc_if->sk_softc;
2765 SK_IF_LOCK_ASSERT(sc_if);
2767 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2768 sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) {
2769 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2774 * Pack the data into the transmit ring. If we
2775 * don't have room, set the OACTIVE flag and wait
2776 * for the NIC to drain the ring.
2778 if (sk_encap(sc_if, &m_head)) {
2781 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2782 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2788 * If there's a BPF listener, bounce a copy of this frame
2791 BPF_MTAP(ifp, m_head);
2796 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2798 /* Set a timeout in case the chip goes out to lunch. */
2799 sc_if->sk_watchdog_timer = 5;
2808 struct sk_if_softc *sc_if;
2812 sc_if = ifp->if_softc;
2814 SK_IF_LOCK_ASSERT(sc_if);
2816 if (sc_if->sk_watchdog_timer == 0 || --sc_if->sk_watchdog_timer)
2820 * Reclaim first as there is a possibility of losing Tx completion
2824 if (sc_if->sk_cdata.sk_tx_cnt != 0) {
2825 if_printf(sc_if->sk_ifp, "watchdog timeout\n");
2827 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2828 sk_init_locked(sc_if);
2832 callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
2841 struct sk_softc *sc;
2843 sc = device_get_softc(dev);
2846 /* Turn off the 'driver is loaded' LED. */
2847 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
2850 * Reset the GEnesis controller. Doing this should also
2851 * assert the resets on the attached XMAC(s).
2863 struct sk_softc *sc;
2864 struct sk_if_softc *sc_if0, *sc_if1;
2865 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2867 sc = device_get_softc(dev);
2871 sc_if0 = sc->sk_if[SK_PORT_A];
2872 sc_if1 = sc->sk_if[SK_PORT_B];
2874 ifp0 = sc_if0->sk_ifp;
2876 ifp1 = sc_if1->sk_ifp;
2881 sc->sk_suspended = 1;
2892 struct sk_softc *sc;
2893 struct sk_if_softc *sc_if0, *sc_if1;
2894 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2896 sc = device_get_softc(dev);
2900 sc_if0 = sc->sk_if[SK_PORT_A];
2901 sc_if1 = sc->sk_if[SK_PORT_B];
2903 ifp0 = sc_if0->sk_ifp;
2905 ifp1 = sc_if1->sk_ifp;
2906 if (ifp0 != NULL && ifp0->if_flags & IFF_UP)
2907 sk_init_locked(sc_if0);
2908 if (ifp1 != NULL && ifp1->if_flags & IFF_UP)
2909 sk_init_locked(sc_if1);
2910 sc->sk_suspended = 0;
2918 * According to the data sheet from SK-NET GENESIS the hardware can compute
2919 * two Rx checksums at the same time(Each checksum start position is
2920 * programmed in Rx descriptors). However it seems that TCP/UDP checksum
2921 * does not work at least on my Yukon hardware. I tried every possible ways
2922 * to get correct checksum value but couldn't get correct one. So TCP/UDP
2923 * checksum offload was disabled at the moment and only IP checksum offload
2925 * As nomral IP header size is 20 bytes I can't expect it would give an
2926 * increase in throughput. However it seems it doesn't hurt performance in
2927 * my testing. If there is a more detailed information for checksum secret
2928 * of the hardware in question please contact yongari@FreeBSD.org to add
2929 * TCP/UDP checksum offload support.
2931 static __inline void
2932 sk_rxcksum(ifp, m, csum)
2937 struct ether_header *eh;
2939 int32_t hlen, len, pktlen;
2940 u_int16_t csum1, csum2, ipcsum;
2942 pktlen = m->m_pkthdr.len;
2943 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
2945 eh = mtod(m, struct ether_header *);
2946 if (eh->ether_type != htons(ETHERTYPE_IP))
2948 ip = (struct ip *)(eh + 1);
2949 if (ip->ip_v != IPVERSION)
2951 hlen = ip->ip_hl << 2;
2952 pktlen -= sizeof(struct ether_header);
2953 if (hlen < sizeof(struct ip))
2955 if (ntohs(ip->ip_len) < hlen)
2957 if (ntohs(ip->ip_len) != pktlen)
2960 csum1 = htons(csum & 0xffff);
2961 csum2 = htons((csum >> 16) & 0xffff);
2962 ipcsum = in_addword(csum1, ~csum2 & 0xffff);
2963 /* checksum fixup for IP options */
2964 len = hlen - sizeof(struct ip);
2967 * If the second checksum value is correct we can compute IP
2968 * checksum with simple math. Unfortunately the second checksum
2969 * value is wrong so we can't verify the checksum from the
2970 * value(It seems there is some magic here to get correct
2971 * value). If the second checksum value is correct it also
2972 * means we can get TCP/UDP checksum) here. However, it still
2973 * needs pseudo header checksum calculation due to hardware
2978 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2979 if (ipcsum == 0xffff)
2980 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2984 sk_rxvalid(sc, stat, len)
2985 struct sk_softc *sc;
2986 u_int32_t stat, len;
2989 if (sc->sk_type == SK_GENESIS) {
2990 if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
2991 XM_RXSTAT_BYTES(stat) != len)
2994 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
2995 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
2996 YU_RXSTAT_JABBER)) != 0 ||
2997 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
2998 YU_RXSTAT_BYTES(stat) != len)
3007 struct sk_if_softc *sc_if;
3009 struct sk_softc *sc;
3012 struct sk_rx_desc *cur_rx;
3013 struct sk_rxdesc *rxd;
3015 u_int32_t csum, rxstat, sk_ctl;
3017 sc = sc_if->sk_softc;
3018 ifp = sc_if->sk_ifp;
3020 SK_IF_LOCK_ASSERT(sc_if);
3022 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
3023 sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD);
3026 for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT;
3027 prog++, SK_INC(cons, SK_RX_RING_CNT)) {
3028 cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons];
3029 sk_ctl = le32toh(cur_rx->sk_ctl);
3030 if ((sk_ctl & SK_RXCTL_OWN) != 0)
3032 rxd = &sc_if->sk_cdata.sk_rxdesc[cons];
3033 rxstat = le32toh(cur_rx->sk_xmac_rxstat);
3035 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
3036 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
3037 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
3038 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
3039 SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN ||
3040 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
3042 sk_discard_rxbuf(sc_if, cons);
3047 csum = le32toh(cur_rx->sk_csum);
3048 if (sk_newbuf(sc_if, cons) != 0) {
3050 /* reuse old buffer */
3051 sk_discard_rxbuf(sc_if, cons);
3054 m->m_pkthdr.rcvif = ifp;
3055 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
3057 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3058 sk_rxcksum(ifp, m, csum);
3059 SK_IF_UNLOCK(sc_if);
3060 (*ifp->if_input)(ifp, m);
3065 sc_if->sk_cdata.sk_rx_cons = cons;
3066 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
3067 sc_if->sk_cdata.sk_rx_ring_map,
3068 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3073 sk_jumbo_rxeof(sc_if)
3074 struct sk_if_softc *sc_if;
3076 struct sk_softc *sc;
3079 struct sk_rx_desc *cur_rx;
3080 struct sk_rxdesc *jrxd;
3082 u_int32_t csum, rxstat, sk_ctl;
3084 sc = sc_if->sk_softc;
3085 ifp = sc_if->sk_ifp;
3087 SK_IF_LOCK_ASSERT(sc_if);
3089 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
3090 sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD);
3093 for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons;
3094 prog < SK_JUMBO_RX_RING_CNT;
3095 prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) {
3096 cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons];
3097 sk_ctl = le32toh(cur_rx->sk_ctl);
3098 if ((sk_ctl & SK_RXCTL_OWN) != 0)
3100 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons];
3101 rxstat = le32toh(cur_rx->sk_xmac_rxstat);
3103 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
3104 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
3105 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
3106 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
3107 SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN ||
3108 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
3110 sk_discard_jumbo_rxbuf(sc_if, cons);
3115 csum = le32toh(cur_rx->sk_csum);
3116 if (sk_jumbo_newbuf(sc_if, cons) != 0) {
3118 /* reuse old buffer */
3119 sk_discard_jumbo_rxbuf(sc_if, cons);
3122 m->m_pkthdr.rcvif = ifp;
3123 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
3125 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3126 sk_rxcksum(ifp, m, csum);
3127 SK_IF_UNLOCK(sc_if);
3128 (*ifp->if_input)(ifp, m);
3133 sc_if->sk_cdata.sk_jumbo_rx_cons = cons;
3134 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
3135 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
3136 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3142 struct sk_if_softc *sc_if;
3144 struct sk_softc *sc;
3145 struct sk_txdesc *txd;
3146 struct sk_tx_desc *cur_tx;
3148 u_int32_t idx, sk_ctl;
3150 sc = sc_if->sk_softc;
3151 ifp = sc_if->sk_ifp;
3153 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
3156 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
3157 sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD);
3159 * Go through our tx ring and free mbufs for those
3160 * frames that have been sent.
3162 for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) {
3163 if (sc_if->sk_cdata.sk_tx_cnt <= 0)
3165 cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx];
3166 sk_ctl = le32toh(cur_tx->sk_ctl);
3167 if (sk_ctl & SK_TXCTL_OWN)
3169 sc_if->sk_cdata.sk_tx_cnt--;
3170 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3171 if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0)
3173 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
3174 BUS_DMASYNC_POSTWRITE);
3175 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
3180 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q);
3181 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
3182 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
3184 sc_if->sk_cdata.sk_tx_cons = idx;
3185 sc_if->sk_watchdog_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
3187 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
3188 sc_if->sk_cdata.sk_tx_ring_map,
3189 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3196 struct sk_if_softc *sc_if;
3197 struct mii_data *mii;
3202 ifp = sc_if->sk_ifp;
3203 mii = device_get_softc(sc_if->sk_miibus);
3205 if (!(ifp->if_flags & IFF_UP))
3208 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3209 sk_intr_bcom(sc_if);
3214 * According to SysKonnect, the correct way to verify that
3215 * the link has come back up is to poll bit 0 of the GPIO
3216 * register three times. This pin has the signal from the
3217 * link_sync pin connected to it; if we read the same link
3218 * state 3 times in a row, we know the link is up.
3220 for (i = 0; i < 3; i++) {
3221 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
3226 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3230 /* Turn the GP0 interrupt back on. */
3231 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
3232 SK_XM_READ_2(sc_if, XM_ISR);
3234 callout_stop(&sc_if->sk_tick_ch);
3238 sk_yukon_tick(xsc_if)
3241 struct sk_if_softc *sc_if;
3242 struct mii_data *mii;
3245 mii = device_get_softc(sc_if->sk_miibus);
3248 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
3253 struct sk_if_softc *sc_if;
3255 struct mii_data *mii;
3258 mii = device_get_softc(sc_if->sk_miibus);
3259 ifp = sc_if->sk_ifp;
3261 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3264 * Read the PHY interrupt register to make sure
3265 * we clear any pending interrupts.
3267 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
3269 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3270 sk_init_xmac(sc_if);
3274 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
3276 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
3279 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
3281 /* Turn off the link LED. */
3282 SK_IF_WRITE_1(sc_if, 0,
3283 SK_LINKLED1_CTL, SK_LINKLED_OFF);
3285 } else if (status & BRGPHY_ISR_LNK_CHG) {
3286 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3287 BRGPHY_MII_IMR, 0xFF00);
3290 /* Turn on the link LED. */
3291 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3292 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
3293 SK_LINKLED_BLINK_OFF);
3296 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3300 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3307 struct sk_if_softc *sc_if;
3309 struct sk_softc *sc;
3312 sc = sc_if->sk_softc;
3313 status = SK_XM_READ_2(sc_if, XM_ISR);
3316 * Link has gone down. Start MII tick timeout to
3317 * watch for link resync.
3319 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
3320 if (status & XM_ISR_GP0_SET) {
3321 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
3322 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3325 if (status & XM_ISR_AUTONEG_DONE) {
3326 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3330 if (status & XM_IMR_TX_UNDERRUN)
3331 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
3333 if (status & XM_IMR_RX_OVERRUN)
3334 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
3336 status = SK_XM_READ_2(sc_if, XM_ISR);
3342 sk_intr_yukon(sc_if)
3343 struct sk_if_softc *sc_if;
3347 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
3349 if ((status & SK_GMAC_INT_RX_OVER) != 0) {
3350 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3351 SK_RFCTL_RX_FIFO_OVER);
3354 if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
3355 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3356 SK_TFCTL_TX_FIFO_UNDER);
3364 struct sk_softc *sc = xsc;
3365 struct sk_if_softc *sc_if0, *sc_if1;
3366 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
3371 status = CSR_READ_4(sc, SK_ISSR);
3372 if (status == 0 || status == 0xffffffff || sc->sk_suspended)
3375 sc_if0 = sc->sk_if[SK_PORT_A];
3376 sc_if1 = sc->sk_if[SK_PORT_B];
3379 ifp0 = sc_if0->sk_ifp;
3381 ifp1 = sc_if1->sk_ifp;
3383 for (; (status &= sc->sk_intrmask) != 0;) {
3384 /* Handle receive interrupts first. */
3385 if (status & SK_ISR_RX1_EOF) {
3386 if (ifp0->if_mtu > SK_MAX_FRAMELEN)
3387 sk_jumbo_rxeof(sc_if0);
3390 CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
3391 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3393 if (status & SK_ISR_RX2_EOF) {
3394 if (ifp1->if_mtu > SK_MAX_FRAMELEN)
3395 sk_jumbo_rxeof(sc_if1);
3398 CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
3399 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3402 /* Then transmit interrupts. */
3403 if (status & SK_ISR_TX1_S_EOF) {
3405 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF);
3407 if (status & SK_ISR_TX2_S_EOF) {
3409 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF);
3412 /* Then MAC interrupts. */
3413 if (status & SK_ISR_MAC1 &&
3414 ifp0->if_drv_flags & IFF_DRV_RUNNING) {
3415 if (sc->sk_type == SK_GENESIS)
3416 sk_intr_xmac(sc_if0);
3418 sk_intr_yukon(sc_if0);
3421 if (status & SK_ISR_MAC2 &&
3422 ifp1->if_drv_flags & IFF_DRV_RUNNING) {
3423 if (sc->sk_type == SK_GENESIS)
3424 sk_intr_xmac(sc_if1);
3426 sk_intr_yukon(sc_if1);
3429 if (status & SK_ISR_EXTERNAL_REG) {
3431 sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
3432 sk_intr_bcom(sc_if0);
3434 sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
3435 sk_intr_bcom(sc_if1);
3437 status = CSR_READ_4(sc, SK_ISSR);
3440 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3442 if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3443 sk_start_locked(ifp0);
3444 if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3445 sk_start_locked(ifp1);
3453 struct sk_if_softc *sc_if;
3455 struct sk_softc *sc;
3457 u_int16_t eaddr[(ETHER_ADDR_LEN+1)/2];
3458 struct sk_bcom_hack bhack[] = {
3459 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
3460 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
3461 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
3464 SK_IF_LOCK_ASSERT(sc_if);
3466 sc = sc_if->sk_softc;
3467 ifp = sc_if->sk_ifp;
3469 /* Unreset the XMAC. */
3470 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
3473 /* Reset the XMAC's internal state. */
3474 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3476 /* Save the XMAC II revision */
3477 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
3480 * Perform additional initialization for external PHYs,
3481 * namely for the 1000baseTX cards that use the XMAC's
3484 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3488 /* Take PHY out of reset. */
3489 val = sk_win_read_4(sc, SK_GPIO);
3490 if (sc_if->sk_port == SK_PORT_A)
3491 val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
3493 val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
3494 sk_win_write_4(sc, SK_GPIO, val);
3496 /* Enable GMII mode on the XMAC. */
3497 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
3499 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3500 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
3502 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3503 BRGPHY_MII_IMR, 0xFFF0);
3506 * Early versions of the BCM5400 apparently have
3507 * a bug that requires them to have their reserved
3508 * registers initialized to some magic values. I don't
3509 * know what the numbers do, I'm just the messenger.
3511 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
3513 while(bhack[i].reg) {
3514 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3515 bhack[i].reg, bhack[i].val);
3521 /* Set station address */
3522 bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN);
3523 SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]);
3524 SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]);
3525 SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]);
3526 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
3528 if (ifp->if_flags & IFF_BROADCAST) {
3529 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3531 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3534 /* We don't need the FCS appended to the packet. */
3535 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
3537 /* We want short frames padded to 60 bytes. */
3538 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
3541 * Enable the reception of all error frames. This is is
3542 * a necessary evil due to the design of the XMAC. The
3543 * XMAC's receive FIFO is only 8K in size, however jumbo
3544 * frames can be up to 9000 bytes in length. When bad
3545 * frame filtering is enabled, the XMAC's RX FIFO operates
3546 * in 'store and forward' mode. For this to work, the
3547 * entire frame has to fit into the FIFO, but that means
3548 * that jumbo frames larger than 8192 bytes will be
3549 * truncated. Disabling all bad frame filtering causes
3550 * the RX FIFO to operate in streaming mode, in which
3551 * case the XMAC will start transfering frames out of the
3552 * RX FIFO as soon as the FIFO threshold is reached.
3554 if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3555 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
3556 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
3557 XM_MODE_RX_INRANGELEN);
3558 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3560 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3563 * Bump up the transmit threshold. This helps hold off transmit
3564 * underruns when we're blasting traffic from both ports at once.
3566 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
3568 /* Set promiscuous mode */
3569 sk_setpromisc(sc_if);
3571 /* Set multicast filter */
3574 /* Clear and enable interrupts */
3575 SK_XM_READ_2(sc_if, XM_ISR);
3576 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
3577 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
3579 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3581 /* Configure MAC arbiter */
3582 switch(sc_if->sk_xmac_rev) {
3583 case XM_XMAC_REV_B2:
3584 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
3585 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
3586 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
3587 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
3588 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
3589 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
3590 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
3591 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
3592 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3594 case XM_XMAC_REV_C1:
3595 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
3596 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
3597 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
3598 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
3599 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
3600 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
3601 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
3602 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
3603 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3608 sk_win_write_2(sc, SK_MACARB_CTL,
3609 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
3617 sk_init_yukon(sc_if)
3618 struct sk_if_softc *sc_if;
3622 struct sk_softc *sc;
3626 SK_IF_LOCK_ASSERT(sc_if);
3628 sc = sc_if->sk_softc;
3629 ifp = sc_if->sk_ifp;
3631 if (sc->sk_type == SK_YUKON_LITE &&
3632 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3634 * Workaround code for COMA mode, set PHY reset.
3635 * Otherwise it will not correctly take chip out of
3638 v = sk_win_read_4(sc, SK_GPIO);
3639 v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
3640 sk_win_write_4(sc, SK_GPIO, v);
3643 /* GMAC and GPHY Reset */
3644 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
3645 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
3648 if (sc->sk_type == SK_YUKON_LITE &&
3649 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3651 * Workaround code for COMA mode, clear PHY reset
3653 v = sk_win_read_4(sc, SK_GPIO);
3656 sk_win_write_4(sc, SK_GPIO, v);
3659 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
3660 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
3662 if (sc->sk_coppertype)
3663 phy |= SK_GPHY_COPPER;
3665 phy |= SK_GPHY_FIBER;
3667 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
3669 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
3670 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
3671 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
3673 /* unused read of the interrupt source register */
3674 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
3676 reg = SK_YU_READ_2(sc_if, YUKON_PAR);
3678 /* MIB Counter Clear Mode set */
3679 reg |= YU_PAR_MIB_CLR;
3680 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3682 /* MIB Counter Clear Mode clear */
3683 reg &= ~YU_PAR_MIB_CLR;
3684 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3686 /* receive control reg */
3687 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
3689 /* transmit parameter register */
3690 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
3691 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
3693 /* serial mode register */
3694 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
3695 if (ifp->if_mtu > SK_MAX_FRAMELEN)
3696 reg |= YU_SMR_MFL_JUMBO;
3697 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
3699 /* Setup Yukon's address */
3700 for (i = 0; i < 3; i++) {
3701 /* Write Source Address 1 (unicast filter) */
3702 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
3703 IF_LLADDR(sc_if->sk_ifp)[i * 2] |
3704 IF_LLADDR(sc_if->sk_ifp)[i * 2 + 1] << 8);
3707 for (i = 0; i < 3; i++) {
3708 reg = sk_win_read_2(sc_if->sk_softc,
3709 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
3710 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
3713 /* Set promiscuous mode */
3714 sk_setpromisc(sc_if);
3716 /* Set multicast filter */
3719 /* enable interrupt mask for counter overflows */
3720 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
3721 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
3722 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
3724 /* Configure RX MAC FIFO Flush Mask */
3725 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
3726 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
3728 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
3730 /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
3731 if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
3732 v = SK_TFCTL_OPERATION_ON;
3734 v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
3735 /* Configure RX MAC FIFO */
3736 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
3737 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
3739 /* Increase flush threshould to 64 bytes */
3740 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
3741 SK_RFCTL_FIFO_THRESHOLD + 1);
3743 /* Configure TX MAC FIFO */
3744 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
3745 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
3749 * Note that to properly initialize any part of the GEnesis chip,
3750 * you first have to take it out of reset mode.
3756 struct sk_if_softc *sc_if = xsc;
3759 sk_init_locked(sc_if);
3760 SK_IF_UNLOCK(sc_if);
3766 sk_init_locked(sc_if)
3767 struct sk_if_softc *sc_if;
3769 struct sk_softc *sc;
3771 struct mii_data *mii;
3776 SK_IF_LOCK_ASSERT(sc_if);
3778 ifp = sc_if->sk_ifp;
3779 sc = sc_if->sk_softc;
3780 mii = device_get_softc(sc_if->sk_miibus);
3782 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3785 /* Cancel pending I/O and free all RX/TX buffers. */
3788 if (sc->sk_type == SK_GENESIS) {
3789 /* Configure LINK_SYNC LED */
3790 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
3791 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3792 SK_LINKLED_LINKSYNC_ON);
3794 /* Configure RX LED */
3795 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
3796 SK_RXLEDCTL_COUNTER_START);
3798 /* Configure TX LED */
3799 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
3800 SK_TXLEDCTL_COUNTER_START);
3804 * Configure descriptor poll timer
3806 * SK-NET GENESIS data sheet says that possibility of losing Start
3807 * transmit command due to CPU/cache related interim storage problems
3808 * under certain conditions. The document recommends a polling
3809 * mechanism to send a Start transmit command to initiate transfer
3810 * of ready descriptors regulary. To cope with this issue sk(4) now
3811 * enables descriptor poll timer to initiate descriptor processing
3812 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
3813 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
3814 * command instead of waiting for next descriptor polling time.
3815 * The same rule may apply to Rx side too but it seems that is not
3816 * needed at the moment.
3817 * Since sk(4) uses descriptor polling as a last resort there is no
3818 * need to set smaller polling time than maximum allowable one.
3820 SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
3822 /* Configure I2C registers */
3824 /* Configure XMAC(s) */
3825 switch (sc->sk_type) {
3827 sk_init_xmac(sc_if);
3832 sk_init_yukon(sc_if);
3837 if (sc->sk_type == SK_GENESIS) {
3838 /* Configure MAC FIFOs */
3839 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
3840 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
3841 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
3843 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
3844 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
3845 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
3848 /* Configure transmit arbiter(s) */
3849 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
3850 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
3852 /* Configure RAMbuffers */
3853 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
3854 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
3855 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
3856 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
3857 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
3858 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
3860 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
3861 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
3862 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
3863 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
3864 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
3865 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
3866 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
3868 /* Configure BMUs */
3869 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
3870 if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3871 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3872 SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3873 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3874 SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3876 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3877 SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0)));
3878 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3879 SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0)));
3882 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
3883 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
3884 SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0)));
3885 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI,
3886 SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0)));
3888 /* Init descriptors */
3889 if (ifp->if_mtu > SK_MAX_FRAMELEN)
3890 error = sk_init_jumbo_rx_ring(sc_if);
3892 error = sk_init_rx_ring(sc_if);
3894 device_printf(sc_if->sk_if_dev,
3895 "initialization failed: no memory for rx buffers\n");
3899 sk_init_tx_ring(sc_if);
3901 /* Set interrupt moderation if changed via sysctl. */
3902 imr = sk_win_read_4(sc, SK_IMTIMERINIT);
3903 if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) {
3904 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
3907 device_printf(sc_if->sk_if_dev,
3908 "interrupt moderation is %d us.\n",
3912 /* Configure interrupt handling */
3913 CSR_READ_4(sc, SK_ISSR);
3914 if (sc_if->sk_port == SK_PORT_A)
3915 sc->sk_intrmask |= SK_INTRS1;
3917 sc->sk_intrmask |= SK_INTRS2;
3919 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
3921 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3924 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
3926 switch(sc->sk_type) {
3928 /* Enable XMACs TX and RX state machines */
3929 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
3930 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3935 reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
3936 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
3938 /* XXX disable 100Mbps and full duplex mode? */
3939 reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS);
3941 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
3944 /* Activate descriptor polling timer */
3945 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
3946 /* start transfer of Tx descriptors */
3947 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
3949 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3950 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3952 switch (sc->sk_type) {
3956 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
3960 callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
3967 struct sk_if_softc *sc_if;
3970 struct sk_softc *sc;
3971 struct sk_txdesc *txd;
3972 struct sk_rxdesc *rxd;
3973 struct sk_rxdesc *jrxd;
3977 SK_IF_LOCK_ASSERT(sc_if);
3978 sc = sc_if->sk_softc;
3979 ifp = sc_if->sk_ifp;
3981 callout_stop(&sc_if->sk_tick_ch);
3982 callout_stop(&sc_if->sk_watchdog_ch);
3984 /* stop Tx descriptor polling timer */
3985 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
3986 /* stop transfer of Tx descriptors */
3987 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
3988 for (i = 0; i < SK_TIMEOUT; i++) {
3989 val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
3990 if ((val & SK_TXBMU_TX_STOP) == 0)
3994 if (i == SK_TIMEOUT)
3995 device_printf(sc_if->sk_if_dev,
3996 "can not stop transfer of Tx descriptor\n");
3997 /* stop transfer of Rx descriptors */
3998 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
3999 for (i = 0; i < SK_TIMEOUT; i++) {
4000 val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
4001 if ((val & SK_RXBMU_RX_STOP) == 0)
4005 if (i == SK_TIMEOUT)
4006 device_printf(sc_if->sk_if_dev,
4007 "can not stop transfer of Rx descriptor\n");
4009 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
4010 /* Put PHY back into reset. */
4011 val = sk_win_read_4(sc, SK_GPIO);
4012 if (sc_if->sk_port == SK_PORT_A) {
4013 val |= SK_GPIO_DIR0;
4014 val &= ~SK_GPIO_DAT0;
4016 val |= SK_GPIO_DIR2;
4017 val &= ~SK_GPIO_DAT2;
4019 sk_win_write_4(sc, SK_GPIO, val);
4022 /* Turn off various components of this interface. */
4023 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
4024 switch (sc->sk_type) {
4026 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
4027 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
4032 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
4033 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
4036 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
4037 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
4038 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
4039 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
4040 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
4041 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
4042 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
4043 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
4044 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
4046 /* Disable interrupts */
4047 if (sc_if->sk_port == SK_PORT_A)
4048 sc->sk_intrmask &= ~SK_INTRS1;
4050 sc->sk_intrmask &= ~SK_INTRS2;
4051 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
4053 SK_XM_READ_2(sc_if, XM_ISR);
4054 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
4056 /* Free RX and TX mbufs still in the queues. */
4057 for (i = 0; i < SK_RX_RING_CNT; i++) {
4058 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
4059 if (rxd->rx_m != NULL) {
4060 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag,
4061 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4062 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag,
4068 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
4069 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
4070 if (jrxd->rx_m != NULL) {
4071 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag,
4072 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4073 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
4075 m_freem(jrxd->rx_m);
4079 for (i = 0; i < SK_TX_RING_CNT; i++) {
4080 txd = &sc_if->sk_cdata.sk_txdesc[i];
4081 if (txd->tx_m != NULL) {
4082 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag,
4083 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
4084 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag,
4091 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
4097 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4103 value = *(int *)arg1;
4104 error = sysctl_handle_int(oidp, &value, 0, req);
4105 if (error || !req->newptr)
4107 if (value < low || value > high)
4109 *(int *)arg1 = value;
4114 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)
4116 return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX));