1 /******************************************************************************
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
9 *****************************************************************************/
11 /******************************************************************************
14 * Copyright (C) Marvell International Ltd. and/or its affiliates
16 * The computer program files contained in this folder ("Files")
17 * are provided to you under the BSD-type license terms provided
18 * below, and any use of such Files and any derivative works
19 * thereof created by you shall be governed by the following terms
22 * - Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials provided
27 * with the distribution.
28 * - Neither the name of Marvell nor the names of its contributors
29 * may be used to endorse or promote products derived from this
30 * software without specific prior written permission.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 *****************************************************************************/
49 * Copyright (c) 1997, 1998, 1999, 2000
50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
96 * Device driver for the Marvell Yukon II Ethernet controller.
97 * Due to lack of documentation, this driver is based on the code from
98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
101 #include <sys/cdefs.h>
102 __FBSDID("$FreeBSD$");
104 #include <sys/param.h>
105 #include <sys/systm.h>
107 #include <sys/endian.h>
108 #include <sys/mbuf.h>
109 #include <sys/malloc.h>
110 #include <sys/kernel.h>
111 #include <sys/module.h>
112 #include <sys/socket.h>
113 #include <sys/sockio.h>
114 #include <sys/queue.h>
115 #include <sys/sysctl.h>
116 #include <sys/taskqueue.h>
119 #include <net/ethernet.h>
121 #include <net/if_arp.h>
122 #include <net/if_dl.h>
123 #include <net/if_media.h>
124 #include <net/if_types.h>
125 #include <net/if_vlan_var.h>
127 #include <netinet/in.h>
128 #include <netinet/in_systm.h>
129 #include <netinet/ip.h>
130 #include <netinet/tcp.h>
131 #include <netinet/udp.h>
133 #include <machine/bus.h>
134 #include <machine/in_cksum.h>
135 #include <machine/resource.h>
136 #include <sys/rman.h>
138 #include <dev/mii/mii.h>
139 #include <dev/mii/miivar.h>
140 #include <dev/mii/brgphyreg.h>
142 #include <dev/pci/pcireg.h>
143 #include <dev/pci/pcivar.h>
145 #include <dev/msk/if_mskreg.h>
147 MODULE_DEPEND(msk, pci, 1, 1, 1);
148 MODULE_DEPEND(msk, ether, 1, 1, 1);
149 MODULE_DEPEND(msk, miibus, 1, 1, 1);
151 /* "device miibus" required. See GENERIC if you get errors here. */
152 #include "miibus_if.h"
155 static int msi_disable = 0;
156 TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
157 static int legacy_intr = 0;
158 TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
159 static int jumbo_disable = 0;
160 TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
162 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
165 * Devices supported by this driver.
167 static struct msk_product {
168 uint16_t msk_vendorid;
169 uint16_t msk_deviceid;
170 const char *msk_name;
172 { VENDORID_SK, DEVICEID_SK_YUKON2,
173 "SK-9Sxx Gigabit Ethernet" },
174 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
175 "SK-9Exx Gigabit Ethernet"},
176 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
177 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
178 { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
179 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
180 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
181 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
182 { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
183 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
184 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
185 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
186 { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
187 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
188 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
189 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
190 { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
191 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
192 { VENDORID_MARVELL, DEVICEID_MRVL_8035,
193 "Marvell Yukon 88E8035 Fast Ethernet" },
194 { VENDORID_MARVELL, DEVICEID_MRVL_8036,
195 "Marvell Yukon 88E8036 Fast Ethernet" },
196 { VENDORID_MARVELL, DEVICEID_MRVL_8038,
197 "Marvell Yukon 88E8038 Fast Ethernet" },
198 { VENDORID_MARVELL, DEVICEID_MRVL_8039,
199 "Marvell Yukon 88E8039 Fast Ethernet" },
200 { VENDORID_MARVELL, DEVICEID_MRVL_8040,
201 "Marvell Yukon 88E8040 Fast Ethernet" },
202 { VENDORID_MARVELL, DEVICEID_MRVL_8040T,
203 "Marvell Yukon 88E8040T Fast Ethernet" },
204 { VENDORID_MARVELL, DEVICEID_MRVL_8048,
205 "Marvell Yukon 88E8048 Fast Ethernet" },
206 { VENDORID_MARVELL, DEVICEID_MRVL_4361,
207 "Marvell Yukon 88E8050 Gigabit Ethernet" },
208 { VENDORID_MARVELL, DEVICEID_MRVL_4360,
209 "Marvell Yukon 88E8052 Gigabit Ethernet" },
210 { VENDORID_MARVELL, DEVICEID_MRVL_4362,
211 "Marvell Yukon 88E8053 Gigabit Ethernet" },
212 { VENDORID_MARVELL, DEVICEID_MRVL_4363,
213 "Marvell Yukon 88E8055 Gigabit Ethernet" },
214 { VENDORID_MARVELL, DEVICEID_MRVL_4364,
215 "Marvell Yukon 88E8056 Gigabit Ethernet" },
216 { VENDORID_MARVELL, DEVICEID_MRVL_4365,
217 "Marvell Yukon 88E8070 Gigabit Ethernet" },
218 { VENDORID_MARVELL, DEVICEID_MRVL_436A,
219 "Marvell Yukon 88E8058 Gigabit Ethernet" },
220 { VENDORID_MARVELL, DEVICEID_MRVL_436B,
221 "Marvell Yukon 88E8071 Gigabit Ethernet" },
222 { VENDORID_MARVELL, DEVICEID_MRVL_436C,
223 "Marvell Yukon 88E8072 Gigabit Ethernet" },
224 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
225 "D-Link 550SX Gigabit Ethernet" },
226 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
227 "D-Link 560T Gigabit Ethernet" }
230 static const char *model_name[] = {
239 static int mskc_probe(device_t);
240 static int mskc_attach(device_t);
241 static int mskc_detach(device_t);
242 static int mskc_shutdown(device_t);
243 static int mskc_setup_rambuffer(struct msk_softc *);
244 static int mskc_suspend(device_t);
245 static int mskc_resume(device_t);
246 static void mskc_reset(struct msk_softc *);
248 static int msk_probe(device_t);
249 static int msk_attach(device_t);
250 static int msk_detach(device_t);
252 static void msk_tick(void *);
253 static void msk_legacy_intr(void *);
254 static int msk_intr(void *);
255 static void msk_int_task(void *, int);
256 static void msk_intr_phy(struct msk_if_softc *);
257 static void msk_intr_gmac(struct msk_if_softc *);
258 static __inline void msk_rxput(struct msk_if_softc *);
259 static int msk_handle_events(struct msk_softc *);
260 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
261 static void msk_intr_hwerr(struct msk_softc *);
262 #ifndef __NO_STRICT_ALIGNMENT
263 static __inline void msk_fixup_rx(struct mbuf *);
265 static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
266 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
267 static void msk_txeof(struct msk_if_softc *, int);
268 static int msk_encap(struct msk_if_softc *, struct mbuf **);
269 static void msk_tx_task(void *, int);
270 static void msk_start(struct ifnet *);
271 static int msk_ioctl(struct ifnet *, u_long, caddr_t);
272 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
273 static void msk_set_rambuffer(struct msk_if_softc *);
274 static void msk_set_tx_stfwd(struct msk_if_softc *);
275 static void msk_init(void *);
276 static void msk_init_locked(struct msk_if_softc *);
277 static void msk_stop(struct msk_if_softc *);
278 static void msk_watchdog(struct msk_if_softc *);
279 static int msk_mediachange(struct ifnet *);
280 static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
281 static void msk_phy_power(struct msk_softc *, int);
282 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
283 static int msk_status_dma_alloc(struct msk_softc *);
284 static void msk_status_dma_free(struct msk_softc *);
285 static int msk_txrx_dma_alloc(struct msk_if_softc *);
286 static int msk_rx_dma_jalloc(struct msk_if_softc *);
287 static void msk_txrx_dma_free(struct msk_if_softc *);
288 static void msk_rx_dma_jfree(struct msk_if_softc *);
289 static int msk_init_rx_ring(struct msk_if_softc *);
290 static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
291 static void msk_init_tx_ring(struct msk_if_softc *);
292 static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
293 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
294 static int msk_newbuf(struct msk_if_softc *, int);
295 static int msk_jumbo_newbuf(struct msk_if_softc *, int);
297 static int msk_phy_readreg(struct msk_if_softc *, int, int);
298 static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
299 static int msk_miibus_readreg(device_t, int, int);
300 static int msk_miibus_writereg(device_t, int, int, int);
301 static void msk_miibus_statchg(device_t);
303 static void msk_rxfilter(struct msk_if_softc *);
304 static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
306 static void msk_stats_clear(struct msk_if_softc *);
307 static void msk_stats_update(struct msk_if_softc *);
308 static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS);
309 static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS);
310 static void msk_sysctl_node(struct msk_if_softc *);
311 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
312 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
314 static device_method_t mskc_methods[] = {
315 /* Device interface */
316 DEVMETHOD(device_probe, mskc_probe),
317 DEVMETHOD(device_attach, mskc_attach),
318 DEVMETHOD(device_detach, mskc_detach),
319 DEVMETHOD(device_suspend, mskc_suspend),
320 DEVMETHOD(device_resume, mskc_resume),
321 DEVMETHOD(device_shutdown, mskc_shutdown),
324 DEVMETHOD(bus_print_child, bus_generic_print_child),
325 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
330 static driver_t mskc_driver = {
333 sizeof(struct msk_softc)
336 static devclass_t mskc_devclass;
338 static device_method_t msk_methods[] = {
339 /* Device interface */
340 DEVMETHOD(device_probe, msk_probe),
341 DEVMETHOD(device_attach, msk_attach),
342 DEVMETHOD(device_detach, msk_detach),
343 DEVMETHOD(device_shutdown, bus_generic_shutdown),
346 DEVMETHOD(bus_print_child, bus_generic_print_child),
347 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
350 DEVMETHOD(miibus_readreg, msk_miibus_readreg),
351 DEVMETHOD(miibus_writereg, msk_miibus_writereg),
352 DEVMETHOD(miibus_statchg, msk_miibus_statchg),
357 static driver_t msk_driver = {
360 sizeof(struct msk_if_softc)
363 static devclass_t msk_devclass;
365 DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0);
366 DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0);
367 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0);
369 static struct resource_spec msk_res_spec_io[] = {
370 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
374 static struct resource_spec msk_res_spec_mem[] = {
375 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
379 static struct resource_spec msk_irq_spec_legacy[] = {
380 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
384 static struct resource_spec msk_irq_spec_msi[] = {
385 { SYS_RES_IRQ, 1, RF_ACTIVE },
389 static struct resource_spec msk_irq_spec_msi2[] = {
390 { SYS_RES_IRQ, 1, RF_ACTIVE },
391 { SYS_RES_IRQ, 2, RF_ACTIVE },
396 msk_miibus_readreg(device_t dev, int phy, int reg)
398 struct msk_if_softc *sc_if;
400 if (phy != PHY_ADDR_MARV)
403 sc_if = device_get_softc(dev);
405 return (msk_phy_readreg(sc_if, phy, reg));
409 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
411 struct msk_softc *sc;
414 sc = sc_if->msk_softc;
416 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
417 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
419 for (i = 0; i < MSK_TIMEOUT; i++) {
421 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
422 if ((val & GM_SMI_CT_RD_VAL) != 0) {
423 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
428 if (i == MSK_TIMEOUT) {
429 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
437 msk_miibus_writereg(device_t dev, int phy, int reg, int val)
439 struct msk_if_softc *sc_if;
441 if (phy != PHY_ADDR_MARV)
444 sc_if = device_get_softc(dev);
446 return (msk_phy_writereg(sc_if, phy, reg, val));
450 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
452 struct msk_softc *sc;
455 sc = sc_if->msk_softc;
457 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
458 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
459 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
460 for (i = 0; i < MSK_TIMEOUT; i++) {
462 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
463 GM_SMI_CT_BUSY) == 0)
466 if (i == MSK_TIMEOUT)
467 if_printf(sc_if->msk_ifp, "phy write timeout\n");
473 msk_miibus_statchg(device_t dev)
475 struct msk_softc *sc;
476 struct msk_if_softc *sc_if;
477 struct mii_data *mii;
481 sc_if = device_get_softc(dev);
482 sc = sc_if->msk_softc;
484 MSK_IF_LOCK_ASSERT(sc_if);
486 mii = device_get_softc(sc_if->msk_miibus);
487 ifp = sc_if->msk_ifp;
488 if (mii == NULL || ifp == NULL ||
489 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
492 sc_if->msk_flags &= ~MSK_FLAG_LINK;
493 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
494 (IFM_AVALID | IFM_ACTIVE)) {
495 switch (IFM_SUBTYPE(mii->mii_media_active)) {
498 sc_if->msk_flags |= MSK_FLAG_LINK;
504 if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
505 sc_if->msk_flags |= MSK_FLAG_LINK;
512 if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) {
513 /* Enable Tx FIFO Underrun. */
514 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
515 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
517 * Because mii(4) notify msk(4) that it detected link status
518 * change, there is no need to enable automatic
519 * speed/flow-control/duplex updates.
521 gmac = GM_GPCR_AU_ALL_DIS;
522 switch (IFM_SUBTYPE(mii->mii_media_active)) {
525 gmac |= GM_GPCR_SPEED_1000;
528 gmac |= GM_GPCR_SPEED_100;
534 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
535 gmac |= GM_GPCR_DUP_FULL;
536 /* Disable Rx flow control. */
537 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0)
538 gmac |= GM_GPCR_FC_RX_DIS;
539 /* Disable Tx flow control. */
540 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0)
541 gmac |= GM_GPCR_FC_TX_DIS;
542 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
543 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
544 /* Read again to ensure writing. */
545 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
548 if (((mii->mii_media_active & IFM_GMASK) &
549 (IFM_FLAG0 | IFM_FLAG1)) == 0)
550 gmac = GMC_PAUSE_OFF;
551 /* Diable pause for 10/100 Mbps in half-duplex mode. */
552 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) &&
553 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX ||
554 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T))
555 gmac = GMC_PAUSE_OFF;
556 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
558 /* Enable PHY interrupt for FIFO underrun/overflow. */
559 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
560 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
563 * Link state changed to down.
564 * Disable PHY interrupts.
566 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
567 /* Disable Rx/Tx MAC. */
568 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
569 if ((GM_GPCR_RX_ENA | GM_GPCR_TX_ENA) != 0) {
570 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
571 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
572 /* Read again to ensure writing. */
573 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
579 msk_rxfilter(struct msk_if_softc *sc_if)
581 struct msk_softc *sc;
583 struct ifmultiaddr *ifma;
588 sc = sc_if->msk_softc;
590 MSK_IF_LOCK_ASSERT(sc_if);
592 ifp = sc_if->msk_ifp;
594 bzero(mchash, sizeof(mchash));
595 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
596 if ((ifp->if_flags & IFF_PROMISC) != 0)
597 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
598 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
599 mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
603 mode |= GM_RXCR_UCF_ENA;
605 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
606 if (ifma->ifma_addr->sa_family != AF_LINK)
608 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
609 ifma->ifma_addr), ETHER_ADDR_LEN);
610 /* Just want the 6 least significant bits. */
612 /* Set the corresponding bit in the hash table. */
613 mchash[crc >> 5] |= 1 << (crc & 0x1f);
615 if_maddr_runlock(ifp);
616 if (mchash[0] != 0 || mchash[1] != 0)
617 mode |= GM_RXCR_MCF_ENA;
620 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
622 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
623 (mchash[0] >> 16) & 0xffff);
624 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
626 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
627 (mchash[1] >> 16) & 0xffff);
628 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
632 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
634 struct msk_softc *sc;
636 sc = sc_if->msk_softc;
637 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
638 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
640 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
643 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
645 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
651 msk_init_rx_ring(struct msk_if_softc *sc_if)
653 struct msk_ring_data *rd;
654 struct msk_rxdesc *rxd;
657 MSK_IF_LOCK_ASSERT(sc_if);
659 sc_if->msk_cdata.msk_rx_cons = 0;
660 sc_if->msk_cdata.msk_rx_prod = 0;
661 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
663 rd = &sc_if->msk_rdata;
664 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
665 prod = sc_if->msk_cdata.msk_rx_prod;
666 for (i = 0; i < MSK_RX_RING_CNT; i++) {
667 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
669 rxd->rx_le = &rd->msk_rx_ring[prod];
670 if (msk_newbuf(sc_if, prod) != 0)
672 MSK_INC(prod, MSK_RX_RING_CNT);
675 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
676 sc_if->msk_cdata.msk_rx_ring_map,
677 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
679 /* Update prefetch unit. */
680 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
681 CSR_WRITE_2(sc_if->msk_softc,
682 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
683 sc_if->msk_cdata.msk_rx_prod);
689 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
691 struct msk_ring_data *rd;
692 struct msk_rxdesc *rxd;
695 MSK_IF_LOCK_ASSERT(sc_if);
697 sc_if->msk_cdata.msk_rx_cons = 0;
698 sc_if->msk_cdata.msk_rx_prod = 0;
699 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
701 rd = &sc_if->msk_rdata;
702 bzero(rd->msk_jumbo_rx_ring,
703 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
704 prod = sc_if->msk_cdata.msk_rx_prod;
705 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
706 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
708 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
709 if (msk_jumbo_newbuf(sc_if, prod) != 0)
711 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
714 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
715 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
716 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
718 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
719 CSR_WRITE_2(sc_if->msk_softc,
720 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
721 sc_if->msk_cdata.msk_rx_prod);
727 msk_init_tx_ring(struct msk_if_softc *sc_if)
729 struct msk_ring_data *rd;
730 struct msk_txdesc *txd;
733 sc_if->msk_cdata.msk_tso_mtu = 0;
734 sc_if->msk_cdata.msk_tx_prod = 0;
735 sc_if->msk_cdata.msk_tx_cons = 0;
736 sc_if->msk_cdata.msk_tx_cnt = 0;
738 rd = &sc_if->msk_rdata;
739 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
740 for (i = 0; i < MSK_TX_RING_CNT; i++) {
741 txd = &sc_if->msk_cdata.msk_txdesc[i];
743 txd->tx_le = &rd->msk_tx_ring[i];
746 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
747 sc_if->msk_cdata.msk_tx_ring_map,
748 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
752 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
754 struct msk_rx_desc *rx_le;
755 struct msk_rxdesc *rxd;
758 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
761 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
765 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
767 struct msk_rx_desc *rx_le;
768 struct msk_rxdesc *rxd;
771 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
774 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
778 msk_newbuf(struct msk_if_softc *sc_if, int idx)
780 struct msk_rx_desc *rx_le;
781 struct msk_rxdesc *rxd;
783 bus_dma_segment_t segs[1];
787 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
791 m->m_len = m->m_pkthdr.len = MCLBYTES;
792 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
793 m_adj(m, ETHER_ALIGN);
794 #ifndef __NO_STRICT_ALIGNMENT
796 m_adj(m, MSK_RX_BUF_ALIGN);
799 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
800 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
801 BUS_DMA_NOWAIT) != 0) {
805 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
807 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
808 if (rxd->rx_m != NULL) {
809 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
810 BUS_DMASYNC_POSTREAD);
811 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
813 map = rxd->rx_dmamap;
814 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
815 sc_if->msk_cdata.msk_rx_sparemap = map;
816 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
817 BUS_DMASYNC_PREREAD);
820 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
822 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
828 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
830 struct msk_rx_desc *rx_le;
831 struct msk_rxdesc *rxd;
833 bus_dma_segment_t segs[1];
837 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
840 if ((m->m_flags & M_EXT) == 0) {
844 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
845 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
846 m_adj(m, ETHER_ALIGN);
847 #ifndef __NO_STRICT_ALIGNMENT
849 m_adj(m, MSK_RX_BUF_ALIGN);
852 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
853 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
854 BUS_DMA_NOWAIT) != 0) {
858 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
860 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
861 if (rxd->rx_m != NULL) {
862 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
863 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
864 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
867 map = rxd->rx_dmamap;
868 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
869 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
870 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
871 BUS_DMASYNC_PREREAD);
874 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
876 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
885 msk_mediachange(struct ifnet *ifp)
887 struct msk_if_softc *sc_if;
888 struct mii_data *mii;
891 sc_if = ifp->if_softc;
894 mii = device_get_softc(sc_if->msk_miibus);
895 error = mii_mediachg(mii);
896 MSK_IF_UNLOCK(sc_if);
902 * Report current media status.
905 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
907 struct msk_if_softc *sc_if;
908 struct mii_data *mii;
910 sc_if = ifp->if_softc;
912 if ((ifp->if_flags & IFF_UP) == 0) {
913 MSK_IF_UNLOCK(sc_if);
916 mii = device_get_softc(sc_if->msk_miibus);
919 MSK_IF_UNLOCK(sc_if);
920 ifmr->ifm_active = mii->mii_media_active;
921 ifmr->ifm_status = mii->mii_media_status;
925 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
927 struct msk_if_softc *sc_if;
929 struct mii_data *mii;
932 sc_if = ifp->if_softc;
933 ifr = (struct ifreq *)data;
939 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
941 else if (ifp->if_mtu != ifr->ifr_mtu) {
942 if (ifr->ifr_mtu > ETHERMTU) {
943 if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
945 MSK_IF_UNLOCK(sc_if);
948 if ((sc_if->msk_flags &
949 MSK_FLAG_JUMBO_NOCSUM) != 0) {
951 ~(MSK_CSUM_FEATURES | CSUM_TSO);
953 ~(IFCAP_TSO4 | IFCAP_TXCSUM);
954 VLAN_CAPABILITIES(ifp);
957 ifp->if_mtu = ifr->ifr_mtu;
958 msk_init_locked(sc_if);
960 MSK_IF_UNLOCK(sc_if);
964 if ((ifp->if_flags & IFF_UP) != 0) {
965 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
966 ((ifp->if_flags ^ sc_if->msk_if_flags) &
967 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
969 else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0)
970 msk_init_locked(sc_if);
971 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
973 sc_if->msk_if_flags = ifp->if_flags;
974 MSK_IF_UNLOCK(sc_if);
979 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
981 MSK_IF_UNLOCK(sc_if);
985 mii = device_get_softc(sc_if->msk_miibus);
986 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
990 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
991 if ((mask & IFCAP_TXCSUM) != 0 &&
992 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
993 ifp->if_capenable ^= IFCAP_TXCSUM;
994 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
995 ifp->if_hwassist |= MSK_CSUM_FEATURES;
997 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
999 if ((mask & IFCAP_RXCSUM) != 0 &&
1000 (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
1001 ifp->if_capenable ^= IFCAP_RXCSUM;
1002 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1003 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
1004 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1005 msk_setvlan(sc_if, ifp);
1007 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1008 (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0)
1009 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1010 if ((mask & IFCAP_TSO4) != 0 &&
1011 (IFCAP_TSO4 & ifp->if_capabilities) != 0) {
1012 ifp->if_capenable ^= IFCAP_TSO4;
1013 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1014 ifp->if_hwassist |= CSUM_TSO;
1016 ifp->if_hwassist &= ~CSUM_TSO;
1018 if (ifp->if_mtu > ETHERMTU &&
1019 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
1020 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
1021 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1024 VLAN_CAPABILITIES(ifp);
1025 MSK_IF_UNLOCK(sc_if);
1028 error = ether_ioctl(ifp, command, data);
1036 mskc_probe(device_t dev)
1038 struct msk_product *mp;
1039 uint16_t vendor, devid;
1042 vendor = pci_get_vendor(dev);
1043 devid = pci_get_device(dev);
1045 for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]);
1047 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
1048 device_set_desc(dev, mp->msk_name);
1049 return (BUS_PROBE_DEFAULT);
1057 mskc_setup_rambuffer(struct msk_softc *sc)
1062 /* Get adapter SRAM size. */
1063 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
1065 device_printf(sc->msk_dev,
1066 "RAM buffer size : %dKB\n", sc->msk_ramsize);
1067 if (sc->msk_ramsize == 0)
1070 sc->msk_pflags |= MSK_FLAG_RAMBUF;
1072 * Give receiver 2/3 of memory and round down to the multiple
1073 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
1076 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
1077 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
1078 for (i = 0, next = 0; i < sc->msk_num_port; i++) {
1079 sc->msk_rxqstart[i] = next;
1080 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
1081 next = sc->msk_rxqend[i] + 1;
1082 sc->msk_txqstart[i] = next;
1083 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
1084 next = sc->msk_txqend[i] + 1;
1086 device_printf(sc->msk_dev,
1087 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1088 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
1090 device_printf(sc->msk_dev,
1091 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1092 sc->msk_txqsize / 1024, sc->msk_txqstart[i],
1101 msk_phy_power(struct msk_softc *sc, int mode)
1107 case MSK_PHY_POWERUP:
1108 /* Switch power to VCC (WA for VAUX problem). */
1109 CSR_WRITE_1(sc, B0_POWER_CTRL,
1110 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1111 /* Disable Core Clock Division, set Clock Select to 0. */
1112 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1115 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1116 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1117 /* Enable bits are inverted. */
1118 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1119 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1120 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1123 * Enable PCI & Core Clock, enable clock gating for both Links.
1125 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1127 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1128 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1129 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
1130 if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1131 /* Deassert Low Power for 1st PHY. */
1132 val |= PCI_Y2_PHY1_COMA;
1133 if (sc->msk_num_port > 1)
1134 val |= PCI_Y2_PHY2_COMA;
1137 /* Release PHY from PowerDown/COMA mode. */
1138 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1139 switch (sc->msk_hw_id) {
1140 case CHIP_ID_YUKON_EC_U:
1141 case CHIP_ID_YUKON_EX:
1142 case CHIP_ID_YUKON_FE_P:
1143 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_OFF);
1145 /* Enable all clocks. */
1146 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4);
1147 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4);
1148 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
1149 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
1150 /* Set all bits to 0 except bits 15..12. */
1151 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4);
1152 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_5, 4);
1153 our &= PCI_CTL_TIM_VMAIN_AV_MSK;
1154 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, our, 4);
1155 pci_write_config(sc->msk_dev, PCI_CFG_REG_1, 0, 4);
1157 * Disable status race, workaround for
1158 * Yukon EC Ultra & Yukon EX.
1160 val = CSR_READ_4(sc, B2_GP_IO);
1161 val |= GLB_GPIO_STAT_RACE_DIS;
1162 CSR_WRITE_4(sc, B2_GP_IO, val);
1163 CSR_READ_4(sc, B2_GP_IO);
1168 for (i = 0; i < sc->msk_num_port; i++) {
1169 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1171 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1175 case MSK_PHY_POWERDOWN:
1176 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1177 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1178 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1179 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1180 val &= ~PCI_Y2_PHY1_COMA;
1181 if (sc->msk_num_port > 1)
1182 val &= ~PCI_Y2_PHY2_COMA;
1184 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1186 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1187 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1188 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1189 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1190 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1191 /* Enable bits are inverted. */
1195 * Disable PCI & Core Clock, disable clock gating for
1198 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1199 CSR_WRITE_1(sc, B0_POWER_CTRL,
1200 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1208 mskc_reset(struct msk_softc *sc)
1215 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1218 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) {
1219 status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
1220 /* Clear AHB bridge & microcontroller reset. */
1221 status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
1222 Y2_ASF_HCU_CCSR_CPU_RST_MODE);
1223 /* Clear ASF microcontroller state. */
1224 status &= ~ Y2_ASF_HCU_CCSR_UC_STATE_MSK;
1225 CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
1227 CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1228 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1231 * Since we disabled ASF, S/W reset is required for Power Management.
1233 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1234 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1236 /* Clear all error bits in the PCI status register. */
1237 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1238 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1240 pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1241 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1242 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
1243 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1245 switch (sc->msk_bustype) {
1247 /* Clear all PEX errors. */
1248 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1249 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1250 if ((val & PEX_RX_OV) != 0) {
1251 sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1252 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1257 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
1258 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1260 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1261 if (sc->msk_bustype == MSK_PCIX_BUS) {
1262 /* Set Cache Line Size opt. */
1263 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1265 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1269 /* Set PHY power state. */
1270 msk_phy_power(sc, MSK_PHY_POWERUP);
1272 /* Reset GPHY/GMAC Control */
1273 for (i = 0; i < sc->msk_num_port; i++) {
1274 /* GPHY Control reset. */
1275 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1276 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1277 /* GMAC Control reset. */
1278 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1279 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1280 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1281 if (sc->msk_hw_id == CHIP_ID_YUKON_EX)
1282 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
1283 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
1286 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1289 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1291 /* Clear TWSI IRQ. */
1292 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1294 /* Turn off hardware timer. */
1295 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1296 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1298 /* Turn off descriptor polling. */
1299 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1301 /* Turn off time stamps. */
1302 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1303 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1305 /* Configure timeout values. */
1306 for (i = 0; i < sc->msk_num_port; i++) {
1307 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1308 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1309 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1311 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1313 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1315 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1317 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1319 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1321 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1323 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1325 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1327 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1329 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1331 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1335 /* Disable all interrupts. */
1336 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1337 CSR_READ_4(sc, B0_HWE_IMSK);
1338 CSR_WRITE_4(sc, B0_IMSK, 0);
1339 CSR_READ_4(sc, B0_IMSK);
1342 * On dual port PCI-X card, there is an problem where status
1343 * can be received out of order due to split transactions.
1345 if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) {
1349 if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &pcix) == 0) {
1350 pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2);
1351 /* Clear Max Outstanding Split Transactions. */
1353 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1354 pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2);
1355 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1358 if (sc->msk_bustype == MSK_PEX_BUS) {
1361 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2);
1362 /* Change Max. Read Request Size to 4096 bytes. */
1363 v &= ~PEX_DC_MAX_RRS_MSK;
1364 v |= PEX_DC_MAX_RD_RQ_SIZE(5);
1365 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2);
1366 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2);
1367 width = (width & PEX_LS_LINK_WI_MSK) >> 4;
1368 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2);
1369 v = (v & PEX_LS_LINK_WI_MSK) >> 4;
1371 device_printf(sc->msk_dev,
1372 "negotiated width of link(x%d) != "
1373 "max. width of link(x%d)\n", width, v);
1376 /* Clear status list. */
1377 bzero(sc->msk_stat_ring,
1378 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1379 sc->msk_stat_cons = 0;
1380 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1381 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1382 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1383 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1384 /* Set the status list base address. */
1385 addr = sc->msk_stat_ring_paddr;
1386 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1387 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1388 /* Set the status list last index. */
1389 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1390 if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1391 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1392 /* WA for dev. #4.3 */
1393 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1394 /* WA for dev. #4.18 */
1395 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1396 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1398 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1399 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1400 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1401 sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1402 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1404 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1405 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1408 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1410 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1412 /* Enable status unit. */
1413 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1415 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1416 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1417 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1421 msk_probe(device_t dev)
1423 struct msk_softc *sc;
1426 sc = device_get_softc(device_get_parent(dev));
1428 * Not much to do here. We always know there will be
1429 * at least one GMAC present, and if there are two,
1430 * mskc_attach() will create a second device instance
1433 snprintf(desc, sizeof(desc),
1434 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1435 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1437 device_set_desc_copy(dev, desc);
1439 return (BUS_PROBE_DEFAULT);
1443 msk_attach(device_t dev)
1445 struct msk_softc *sc;
1446 struct msk_if_softc *sc_if;
1455 sc_if = device_get_softc(dev);
1456 sc = device_get_softc(device_get_parent(dev));
1457 port = *(int *)device_get_ivars(dev);
1459 sc_if->msk_if_dev = dev;
1460 sc_if->msk_port = port;
1461 sc_if->msk_softc = sc;
1462 sc_if->msk_flags = sc->msk_pflags;
1463 sc->msk_if[port] = sc_if;
1464 /* Setup Tx/Rx queue register offsets. */
1465 if (port == MSK_PORT_A) {
1466 sc_if->msk_txq = Q_XA1;
1467 sc_if->msk_txsq = Q_XS1;
1468 sc_if->msk_rxq = Q_R1;
1470 sc_if->msk_txq = Q_XA2;
1471 sc_if->msk_txsq = Q_XS2;
1472 sc_if->msk_rxq = Q_R2;
1475 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
1476 msk_sysctl_node(sc_if);
1478 if ((error = msk_txrx_dma_alloc(sc_if) != 0))
1480 msk_rx_dma_jalloc(sc_if);
1482 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
1484 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
1488 ifp->if_softc = sc_if;
1489 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1490 ifp->if_mtu = ETHERMTU;
1491 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1493 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1494 * has serious bug in Rx checksum offload for all Yukon II family
1495 * hardware. It seems there is a workaround to make it work somtimes.
1496 * However, the workaround also have to check OP code sequences to
1497 * verify whether the OP code is correct. Sometimes it should compute
1498 * IP/TCP/UDP checksum in driver in order to verify correctness of
1499 * checksum computed by hardware. If you have to compute checksum
1500 * with software to verify the hardware's checksum why have hardware
1501 * compute the checksum? I think there is no reason to spend time to
1502 * make Rx checksum offload work on Yukon II hardware.
1504 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
1506 * Enable Rx checksum offloading if controller support new
1507 * descriptor format.
1509 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
1510 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
1511 ifp->if_capabilities |= IFCAP_RXCSUM;
1512 ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
1513 ifp->if_capenable = ifp->if_capabilities;
1514 ifp->if_ioctl = msk_ioctl;
1515 ifp->if_start = msk_start;
1517 ifp->if_watchdog = NULL;
1518 ifp->if_init = msk_init;
1519 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1520 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
1521 IFQ_SET_READY(&ifp->if_snd);
1523 TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp);
1526 * Get station address for this interface. Note that
1527 * dual port cards actually come with three station
1528 * addresses: one for each port, plus an extra. The
1529 * extra one is used by the SysKonnect driver software
1530 * as a 'virtual' station address for when both ports
1531 * are operating in failover mode. Currently we don't
1532 * use this extra address.
1535 for (i = 0; i < ETHER_ADDR_LEN; i++)
1536 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1539 * Call MI attach routine. Can't hold locks when calling into ether_*.
1541 MSK_IF_UNLOCK(sc_if);
1542 ether_ifattach(ifp, eaddr);
1545 /* VLAN capability setup */
1546 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1547 if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) {
1549 * Due to Tx checksum offload hardware bugs, msk(4) manually
1550 * computes checksum for short frames. For VLAN tagged frames
1551 * this workaround does not work so disable checksum offload
1552 * for VLAN interface.
1554 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1556 * Enable Rx checksum offloading for VLAN taggedd frames
1557 * if controller support new descriptor format.
1559 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
1560 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
1561 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1563 ifp->if_capenable = ifp->if_capabilities;
1566 * Tell the upper layer(s) we support long frames.
1567 * Must appear after the call to ether_ifattach() because
1568 * ether_ifattach() sets ifi_hdrlen to the default value.
1570 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1575 MSK_IF_UNLOCK(sc_if);
1576 error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange,
1579 device_printf(sc_if->msk_if_dev, "no PHY found!\n");
1580 ether_ifdetach(ifp);
1587 /* Access should be ok even though lock has been dropped */
1588 sc->msk_if[port] = NULL;
1596 * Attach the interface. Allocate softc structures, do ifmedia
1597 * setup and ethernet/BPF attach.
1600 mskc_attach(device_t dev)
1602 struct msk_softc *sc;
1603 int error, msic, msir, *port, reg;
1605 sc = device_get_softc(dev);
1607 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1611 * Map control/status registers.
1613 pci_enable_busmaster(dev);
1615 /* Allocate I/O resource */
1616 #ifdef MSK_USEIOSPACE
1617 sc->msk_res_spec = msk_res_spec_io;
1619 sc->msk_res_spec = msk_res_spec_mem;
1621 sc->msk_irq_spec = msk_irq_spec_legacy;
1622 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1624 if (sc->msk_res_spec == msk_res_spec_mem)
1625 sc->msk_res_spec = msk_res_spec_io;
1627 sc->msk_res_spec = msk_res_spec_mem;
1628 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1630 device_printf(dev, "couldn't allocate %s resources\n",
1631 sc->msk_res_spec == msk_res_spec_mem ? "memory" :
1633 mtx_destroy(&sc->msk_mtx);
1638 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1639 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1640 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1641 /* Bail out if chip is not recognized. */
1642 if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1643 sc->msk_hw_id > CHIP_ID_YUKON_FE_P) {
1644 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1645 sc->msk_hw_id, sc->msk_hw_rev);
1646 mtx_destroy(&sc->msk_mtx);
1650 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1651 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1652 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1653 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
1654 "max number of Rx events to process");
1656 sc->msk_process_limit = MSK_PROC_DEFAULT;
1657 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1658 "process_limit", &sc->msk_process_limit);
1660 if (sc->msk_process_limit < MSK_PROC_MIN ||
1661 sc->msk_process_limit > MSK_PROC_MAX) {
1662 device_printf(dev, "process_limit value out of range; "
1663 "using default: %d\n", MSK_PROC_DEFAULT);
1664 sc->msk_process_limit = MSK_PROC_DEFAULT;
1669 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1670 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1671 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1672 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1673 sc->msk_coppertype = 0;
1675 sc->msk_coppertype = 1;
1676 /* Check number of MACs. */
1677 sc->msk_num_port = 1;
1678 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1680 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1684 /* Check bus type. */
1685 if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, ®) == 0)
1686 sc->msk_bustype = MSK_PEX_BUS;
1687 else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, ®) == 0)
1688 sc->msk_bustype = MSK_PCIX_BUS;
1690 sc->msk_bustype = MSK_PCI_BUS;
1692 switch (sc->msk_hw_id) {
1693 case CHIP_ID_YUKON_EC:
1694 sc->msk_clock = 125; /* 125 Mhz */
1695 sc->msk_pflags |= MSK_FLAG_JUMBO;
1697 case CHIP_ID_YUKON_EC_U:
1698 sc->msk_clock = 125; /* 125 Mhz */
1699 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM;
1701 case CHIP_ID_YUKON_EX:
1702 sc->msk_clock = 125; /* 125 Mhz */
1703 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
1704 MSK_FLAG_AUTOTX_CSUM;
1706 * Yukon Extreme seems to have silicon bug for
1707 * automatic Tx checksum calculation capability.
1709 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
1710 sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM;
1712 * Yukon Extreme A0 could not use store-and-forward
1713 * for jumbo frames, so disable Tx checksum
1714 * offloading for jumbo frames.
1716 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
1717 sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM;
1719 case CHIP_ID_YUKON_FE:
1720 sc->msk_clock = 100; /* 100 Mhz */
1721 sc->msk_pflags |= MSK_FLAG_FASTETHER;
1723 case CHIP_ID_YUKON_FE_P:
1724 sc->msk_clock = 50; /* 50 Mhz */
1725 sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 |
1726 MSK_FLAG_AUTOTX_CSUM;
1727 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
1730 * FE+ A0 has status LE writeback bug so msk(4)
1731 * does not rely on status word of received frame
1732 * in msk_rxeof() which in turn disables all
1733 * hardware assistance bits reported by the status
1734 * word as well as validity of the recevied frame.
1735 * Just pass received frames to upper stack with
1736 * minimal test and let upper stack handle them.
1738 sc->msk_pflags |= MSK_FLAG_NOHWVLAN |
1739 MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM;
1742 case CHIP_ID_YUKON_XL:
1743 sc->msk_clock = 156; /* 156 Mhz */
1744 sc->msk_pflags |= MSK_FLAG_JUMBO;
1747 sc->msk_clock = 156; /* 156 Mhz */
1751 /* Allocate IRQ resources. */
1752 msic = pci_msi_count(dev);
1754 device_printf(dev, "MSI count : %d\n", msic);
1756 * The Yukon II reports it can handle two messages, one for each
1757 * possible port. We go ahead and allocate two messages and only
1758 * setup a handler for both if we have a dual port card.
1760 * XXX: I haven't untangled the interrupt handler to handle dual
1761 * port cards with separate MSI messages, so for now I disable MSI
1762 * on dual port cards.
1764 if (legacy_intr != 0)
1766 if (msi_disable == 0) {
1769 case 1: /* 88E8058 reports 1 MSI message */
1771 if (sc->msk_num_port == 1 &&
1772 pci_alloc_msi(dev, &msir) == 0) {
1774 sc->msk_pflags |= MSK_FLAG_MSI;
1775 sc->msk_irq_spec = msic == 2 ?
1779 pci_release_msi(dev);
1784 "Unexpected number of MSI messages : %d\n", msic);
1789 error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1791 device_printf(dev, "couldn't allocate IRQ resources\n");
1795 if ((error = msk_status_dma_alloc(sc)) != 0)
1798 /* Set base interrupt mask. */
1799 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1800 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1801 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1803 /* Reset the adapter. */
1806 if ((error = mskc_setup_rambuffer(sc)) != 0)
1809 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1810 if (sc->msk_devs[MSK_PORT_A] == NULL) {
1811 device_printf(dev, "failed to add child for PORT_A\n");
1815 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1817 device_printf(dev, "failed to allocate memory for "
1818 "ivars of PORT_A\n");
1823 device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
1825 if (sc->msk_num_port > 1) {
1826 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1827 if (sc->msk_devs[MSK_PORT_B] == NULL) {
1828 device_printf(dev, "failed to add child for PORT_B\n");
1832 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1834 device_printf(dev, "failed to allocate memory for "
1835 "ivars of PORT_B\n");
1840 device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
1843 error = bus_generic_attach(dev);
1845 device_printf(dev, "failed to attach port(s)\n");
1849 /* Hook interrupt last to avoid having to lock softc. */
1851 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1852 INTR_MPSAFE, NULL, msk_legacy_intr, sc,
1853 &sc->msk_intrhand[0]);
1855 TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc);
1856 sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK,
1857 taskqueue_thread_enqueue, &sc->msk_tq);
1858 taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq",
1859 device_get_nameunit(sc->msk_dev));
1860 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1861 INTR_MPSAFE, msk_intr, NULL, sc, &sc->msk_intrhand[0]);
1865 device_printf(dev, "couldn't set up interrupt handler\n");
1866 if (legacy_intr == 0)
1867 taskqueue_free(sc->msk_tq);
1879 * Shutdown hardware and free up resources. This can be called any
1880 * time after the mutex has been initialized. It is called in both
1881 * the error case in attach and the normal detach case so it needs
1882 * to be careful about only freeing resources that have actually been
1886 msk_detach(device_t dev)
1888 struct msk_softc *sc;
1889 struct msk_if_softc *sc_if;
1892 sc_if = device_get_softc(dev);
1893 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
1894 ("msk mutex not initialized in msk_detach"));
1897 ifp = sc_if->msk_ifp;
1898 if (device_is_attached(dev)) {
1900 sc_if->msk_flags |= MSK_FLAG_DETACH;
1902 /* Can't hold locks while calling detach. */
1903 MSK_IF_UNLOCK(sc_if);
1904 callout_drain(&sc_if->msk_tick_ch);
1905 taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task);
1906 ether_ifdetach(ifp);
1911 * We're generally called from mskc_detach() which is using
1912 * device_delete_child() to get to here. It's already trashed
1913 * miibus for us, so don't do it here or we'll panic.
1915 * if (sc_if->msk_miibus != NULL) {
1916 * device_delete_child(dev, sc_if->msk_miibus);
1917 * sc_if->msk_miibus = NULL;
1921 msk_rx_dma_jfree(sc_if);
1922 msk_txrx_dma_free(sc_if);
1923 bus_generic_detach(dev);
1927 sc = sc_if->msk_softc;
1928 sc->msk_if[sc_if->msk_port] = NULL;
1929 MSK_IF_UNLOCK(sc_if);
1935 mskc_detach(device_t dev)
1937 struct msk_softc *sc;
1939 sc = device_get_softc(dev);
1940 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
1942 if (device_is_alive(dev)) {
1943 if (sc->msk_devs[MSK_PORT_A] != NULL) {
1944 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
1946 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
1948 if (sc->msk_devs[MSK_PORT_B] != NULL) {
1949 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
1951 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
1953 bus_generic_detach(dev);
1956 /* Disable all interrupts. */
1957 CSR_WRITE_4(sc, B0_IMSK, 0);
1958 CSR_READ_4(sc, B0_IMSK);
1959 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1960 CSR_READ_4(sc, B0_HWE_IMSK);
1963 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
1965 /* Put hardware reset. */
1966 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1968 msk_status_dma_free(sc);
1970 if (legacy_intr == 0 && sc->msk_tq != NULL) {
1971 taskqueue_drain(sc->msk_tq, &sc->msk_int_task);
1972 taskqueue_free(sc->msk_tq);
1975 if (sc->msk_intrhand[0]) {
1976 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1977 sc->msk_intrhand[0] = NULL;
1979 if (sc->msk_intrhand[1]) {
1980 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1981 sc->msk_intrhand[1] = NULL;
1983 bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1984 if ((sc->msk_pflags & MSK_FLAG_MSI) != 0)
1985 pci_release_msi(dev);
1986 bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
1987 mtx_destroy(&sc->msk_mtx);
1992 struct msk_dmamap_arg {
1993 bus_addr_t msk_busaddr;
1997 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1999 struct msk_dmamap_arg *ctx;
2004 ctx->msk_busaddr = segs[0].ds_addr;
2007 /* Create status DMA region. */
2009 msk_status_dma_alloc(struct msk_softc *sc)
2011 struct msk_dmamap_arg ctx;
2014 error = bus_dma_tag_create(
2015 bus_get_dma_tag(sc->msk_dev), /* parent */
2016 MSK_STAT_ALIGN, 0, /* alignment, boundary */
2017 BUS_SPACE_MAXADDR, /* lowaddr */
2018 BUS_SPACE_MAXADDR, /* highaddr */
2019 NULL, NULL, /* filter, filterarg */
2020 MSK_STAT_RING_SZ, /* maxsize */
2022 MSK_STAT_RING_SZ, /* maxsegsize */
2024 NULL, NULL, /* lockfunc, lockarg */
2027 device_printf(sc->msk_dev,
2028 "failed to create status DMA tag\n");
2032 /* Allocate DMA'able memory and load the DMA map for status ring. */
2033 error = bus_dmamem_alloc(sc->msk_stat_tag,
2034 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
2035 BUS_DMA_ZERO, &sc->msk_stat_map);
2037 device_printf(sc->msk_dev,
2038 "failed to allocate DMA'able memory for status ring\n");
2042 ctx.msk_busaddr = 0;
2043 error = bus_dmamap_load(sc->msk_stat_tag,
2044 sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ,
2045 msk_dmamap_cb, &ctx, 0);
2047 device_printf(sc->msk_dev,
2048 "failed to load DMA'able memory for status ring\n");
2051 sc->msk_stat_ring_paddr = ctx.msk_busaddr;
2057 msk_status_dma_free(struct msk_softc *sc)
2060 /* Destroy status block. */
2061 if (sc->msk_stat_tag) {
2062 if (sc->msk_stat_map) {
2063 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
2064 if (sc->msk_stat_ring) {
2065 bus_dmamem_free(sc->msk_stat_tag,
2066 sc->msk_stat_ring, sc->msk_stat_map);
2067 sc->msk_stat_ring = NULL;
2069 sc->msk_stat_map = NULL;
2071 bus_dma_tag_destroy(sc->msk_stat_tag);
2072 sc->msk_stat_tag = NULL;
2077 msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
2079 struct msk_dmamap_arg ctx;
2080 struct msk_txdesc *txd;
2081 struct msk_rxdesc *rxd;
2085 /* Create parent DMA tag. */
2088 * It seems that Yukon II supports full 64bits DMA operations. But
2089 * it needs two descriptors(list elements) for 64bits DMA operations.
2090 * Since we don't know what DMA address mappings(32bits or 64bits)
2091 * would be used in advance for each mbufs, we limits its DMA space
2092 * to be in range of 32bits address space. Otherwise, we should check
2093 * what DMA address is used and chain another descriptor for the
2094 * 64bits DMA operation. This also means descriptor ring size is
2095 * variable. Limiting DMA address to be in 32bit address space greatly
2096 * simplyfies descriptor handling and possibly would increase
2097 * performance a bit due to efficient handling of descriptors.
2098 * Apart from harassing checksum offloading mechanisms, it seems
2099 * it's really bad idea to use a seperate descriptor for 64bit
2100 * DMA operation to save small descriptor memory. Anyway, I've
2101 * never seen these exotic scheme on ethernet interface hardware.
2103 error = bus_dma_tag_create(
2104 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
2105 1, 0, /* alignment, boundary */
2106 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2107 BUS_SPACE_MAXADDR, /* highaddr */
2108 NULL, NULL, /* filter, filterarg */
2109 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2111 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
2113 NULL, NULL, /* lockfunc, lockarg */
2114 &sc_if->msk_cdata.msk_parent_tag);
2116 device_printf(sc_if->msk_if_dev,
2117 "failed to create parent DMA tag\n");
2120 /* Create tag for Tx ring. */
2121 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2122 MSK_RING_ALIGN, 0, /* alignment, boundary */
2123 BUS_SPACE_MAXADDR, /* lowaddr */
2124 BUS_SPACE_MAXADDR, /* highaddr */
2125 NULL, NULL, /* filter, filterarg */
2126 MSK_TX_RING_SZ, /* maxsize */
2128 MSK_TX_RING_SZ, /* maxsegsize */
2130 NULL, NULL, /* lockfunc, lockarg */
2131 &sc_if->msk_cdata.msk_tx_ring_tag);
2133 device_printf(sc_if->msk_if_dev,
2134 "failed to create Tx ring DMA tag\n");
2138 /* Create tag for Rx ring. */
2139 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2140 MSK_RING_ALIGN, 0, /* alignment, boundary */
2141 BUS_SPACE_MAXADDR, /* lowaddr */
2142 BUS_SPACE_MAXADDR, /* highaddr */
2143 NULL, NULL, /* filter, filterarg */
2144 MSK_RX_RING_SZ, /* maxsize */
2146 MSK_RX_RING_SZ, /* maxsegsize */
2148 NULL, NULL, /* lockfunc, lockarg */
2149 &sc_if->msk_cdata.msk_rx_ring_tag);
2151 device_printf(sc_if->msk_if_dev,
2152 "failed to create Rx ring DMA tag\n");
2156 /* Create tag for Tx buffers. */
2157 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2158 1, 0, /* alignment, boundary */
2159 BUS_SPACE_MAXADDR, /* lowaddr */
2160 BUS_SPACE_MAXADDR, /* highaddr */
2161 NULL, NULL, /* filter, filterarg */
2162 MSK_TSO_MAXSIZE, /* maxsize */
2163 MSK_MAXTXSEGS, /* nsegments */
2164 MSK_TSO_MAXSGSIZE, /* maxsegsize */
2166 NULL, NULL, /* lockfunc, lockarg */
2167 &sc_if->msk_cdata.msk_tx_tag);
2169 device_printf(sc_if->msk_if_dev,
2170 "failed to create Tx DMA tag\n");
2176 * Workaround hardware hang which seems to happen when Rx buffer
2177 * is not aligned on multiple of FIFO word(8 bytes).
2179 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2180 rxalign = MSK_RX_BUF_ALIGN;
2181 /* Create tag for Rx buffers. */
2182 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2183 rxalign, 0, /* alignment, boundary */
2184 BUS_SPACE_MAXADDR, /* lowaddr */
2185 BUS_SPACE_MAXADDR, /* highaddr */
2186 NULL, NULL, /* filter, filterarg */
2187 MCLBYTES, /* maxsize */
2189 MCLBYTES, /* maxsegsize */
2191 NULL, NULL, /* lockfunc, lockarg */
2192 &sc_if->msk_cdata.msk_rx_tag);
2194 device_printf(sc_if->msk_if_dev,
2195 "failed to create Rx DMA tag\n");
2199 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
2200 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
2201 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
2202 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
2204 device_printf(sc_if->msk_if_dev,
2205 "failed to allocate DMA'able memory for Tx ring\n");
2209 ctx.msk_busaddr = 0;
2210 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
2211 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
2212 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2214 device_printf(sc_if->msk_if_dev,
2215 "failed to load DMA'able memory for Tx ring\n");
2218 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
2220 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
2221 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
2222 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
2223 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
2225 device_printf(sc_if->msk_if_dev,
2226 "failed to allocate DMA'able memory for Rx ring\n");
2230 ctx.msk_busaddr = 0;
2231 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
2232 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
2233 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2235 device_printf(sc_if->msk_if_dev,
2236 "failed to load DMA'able memory for Rx ring\n");
2239 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
2241 /* Create DMA maps for Tx buffers. */
2242 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2243 txd = &sc_if->msk_cdata.msk_txdesc[i];
2245 txd->tx_dmamap = NULL;
2246 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
2249 device_printf(sc_if->msk_if_dev,
2250 "failed to create Tx dmamap\n");
2254 /* Create DMA maps for Rx buffers. */
2255 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2256 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
2257 device_printf(sc_if->msk_if_dev,
2258 "failed to create spare Rx dmamap\n");
2261 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2262 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2264 rxd->rx_dmamap = NULL;
2265 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2268 device_printf(sc_if->msk_if_dev,
2269 "failed to create Rx dmamap\n");
2279 msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
2281 struct msk_dmamap_arg ctx;
2282 struct msk_rxdesc *jrxd;
2286 if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
2287 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2288 device_printf(sc_if->msk_if_dev,
2289 "disabling jumbo frame support\n");
2292 /* Create tag for jumbo Rx ring. */
2293 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2294 MSK_RING_ALIGN, 0, /* alignment, boundary */
2295 BUS_SPACE_MAXADDR, /* lowaddr */
2296 BUS_SPACE_MAXADDR, /* highaddr */
2297 NULL, NULL, /* filter, filterarg */
2298 MSK_JUMBO_RX_RING_SZ, /* maxsize */
2300 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */
2302 NULL, NULL, /* lockfunc, lockarg */
2303 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2305 device_printf(sc_if->msk_if_dev,
2306 "failed to create jumbo Rx ring DMA tag\n");
2312 * Workaround hardware hang which seems to happen when Rx buffer
2313 * is not aligned on multiple of FIFO word(8 bytes).
2315 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2316 rxalign = MSK_RX_BUF_ALIGN;
2317 /* Create tag for jumbo Rx buffers. */
2318 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2319 rxalign, 0, /* alignment, boundary */
2320 BUS_SPACE_MAXADDR, /* lowaddr */
2321 BUS_SPACE_MAXADDR, /* highaddr */
2322 NULL, NULL, /* filter, filterarg */
2323 MJUM9BYTES, /* maxsize */
2325 MJUM9BYTES, /* maxsegsize */
2327 NULL, NULL, /* lockfunc, lockarg */
2328 &sc_if->msk_cdata.msk_jumbo_rx_tag);
2330 device_printf(sc_if->msk_if_dev,
2331 "failed to create jumbo Rx DMA tag\n");
2335 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2336 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2337 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2338 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2339 &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2341 device_printf(sc_if->msk_if_dev,
2342 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2346 ctx.msk_busaddr = 0;
2347 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2348 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2349 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2350 msk_dmamap_cb, &ctx, 0);
2352 device_printf(sc_if->msk_if_dev,
2353 "failed to load DMA'able memory for jumbo Rx ring\n");
2356 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2358 /* Create DMA maps for jumbo Rx buffers. */
2359 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2360 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2361 device_printf(sc_if->msk_if_dev,
2362 "failed to create spare jumbo Rx dmamap\n");
2365 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2366 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2368 jrxd->rx_dmamap = NULL;
2369 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2372 device_printf(sc_if->msk_if_dev,
2373 "failed to create jumbo Rx dmamap\n");
2381 msk_rx_dma_jfree(sc_if);
2382 device_printf(sc_if->msk_if_dev, "disabling jumbo frame support "
2383 "due to resource shortage\n");
2384 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2389 msk_txrx_dma_free(struct msk_if_softc *sc_if)
2391 struct msk_txdesc *txd;
2392 struct msk_rxdesc *rxd;
2396 if (sc_if->msk_cdata.msk_tx_ring_tag) {
2397 if (sc_if->msk_cdata.msk_tx_ring_map)
2398 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
2399 sc_if->msk_cdata.msk_tx_ring_map);
2400 if (sc_if->msk_cdata.msk_tx_ring_map &&
2401 sc_if->msk_rdata.msk_tx_ring)
2402 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
2403 sc_if->msk_rdata.msk_tx_ring,
2404 sc_if->msk_cdata.msk_tx_ring_map);
2405 sc_if->msk_rdata.msk_tx_ring = NULL;
2406 sc_if->msk_cdata.msk_tx_ring_map = NULL;
2407 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
2408 sc_if->msk_cdata.msk_tx_ring_tag = NULL;
2411 if (sc_if->msk_cdata.msk_rx_ring_tag) {
2412 if (sc_if->msk_cdata.msk_rx_ring_map)
2413 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
2414 sc_if->msk_cdata.msk_rx_ring_map);
2415 if (sc_if->msk_cdata.msk_rx_ring_map &&
2416 sc_if->msk_rdata.msk_rx_ring)
2417 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
2418 sc_if->msk_rdata.msk_rx_ring,
2419 sc_if->msk_cdata.msk_rx_ring_map);
2420 sc_if->msk_rdata.msk_rx_ring = NULL;
2421 sc_if->msk_cdata.msk_rx_ring_map = NULL;
2422 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
2423 sc_if->msk_cdata.msk_rx_ring_tag = NULL;
2426 if (sc_if->msk_cdata.msk_tx_tag) {
2427 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2428 txd = &sc_if->msk_cdata.msk_txdesc[i];
2429 if (txd->tx_dmamap) {
2430 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2432 txd->tx_dmamap = NULL;
2435 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2436 sc_if->msk_cdata.msk_tx_tag = NULL;
2439 if (sc_if->msk_cdata.msk_rx_tag) {
2440 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2441 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2442 if (rxd->rx_dmamap) {
2443 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2445 rxd->rx_dmamap = NULL;
2448 if (sc_if->msk_cdata.msk_rx_sparemap) {
2449 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2450 sc_if->msk_cdata.msk_rx_sparemap);
2451 sc_if->msk_cdata.msk_rx_sparemap = 0;
2453 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2454 sc_if->msk_cdata.msk_rx_tag = NULL;
2456 if (sc_if->msk_cdata.msk_parent_tag) {
2457 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2458 sc_if->msk_cdata.msk_parent_tag = NULL;
2463 msk_rx_dma_jfree(struct msk_if_softc *sc_if)
2465 struct msk_rxdesc *jrxd;
2468 /* Jumbo Rx ring. */
2469 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2470 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2471 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2472 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2473 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2474 sc_if->msk_rdata.msk_jumbo_rx_ring)
2475 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2476 sc_if->msk_rdata.msk_jumbo_rx_ring,
2477 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2478 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2479 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2480 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2481 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2483 /* Jumbo Rx buffers. */
2484 if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2485 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2486 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2487 if (jrxd->rx_dmamap) {
2489 sc_if->msk_cdata.msk_jumbo_rx_tag,
2491 jrxd->rx_dmamap = NULL;
2494 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2495 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2496 sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2497 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2499 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2500 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2505 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2507 struct msk_txdesc *txd, *txd_last;
2508 struct msk_tx_desc *tx_le;
2511 bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2512 uint32_t control, prod, si;
2513 uint16_t offset, tcp_offset, tso_mtu;
2514 int error, i, nseg, tso;
2516 MSK_IF_LOCK_ASSERT(sc_if);
2518 tcp_offset = offset = 0;
2520 if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
2521 (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) ||
2522 ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
2523 (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) {
2525 * Since mbuf has no protocol specific structure information
2526 * in it we have to inspect protocol information here to
2527 * setup TSO and checksum offload. I don't know why Marvell
2528 * made a such decision in chip design because other GigE
2529 * hardwares normally takes care of all these chores in
2530 * hardware. However, TSO performance of Yukon II is very
2531 * good such that it's worth to implement it.
2533 struct ether_header *eh;
2537 if (M_WRITABLE(m) == 0) {
2538 /* Get a writable copy. */
2539 m = m_dup(*m_head, M_DONTWAIT);
2548 offset = sizeof(struct ether_header);
2549 m = m_pullup(m, offset);
2554 eh = mtod(m, struct ether_header *);
2555 /* Check if hardware VLAN insertion is off. */
2556 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2557 offset = sizeof(struct ether_vlan_header);
2558 m = m_pullup(m, offset);
2564 m = m_pullup(m, offset + sizeof(struct ip));
2569 ip = (struct ip *)(mtod(m, char *) + offset);
2570 offset += (ip->ip_hl << 2);
2571 tcp_offset = offset;
2573 * It seems that Yukon II has Tx checksum offload bug for
2574 * small TCP packets that's less than 60 bytes in size
2575 * (e.g. TCP window probe packet, pure ACK packet).
2576 * Common work around like padding with zeros to make the
2577 * frame minimum ethernet frame size didn't work at all.
2578 * Instead of disabling checksum offload completely we
2579 * resort to S/W checksum routine when we encounter short
2581 * Short UDP packets appear to be handled correctly by
2582 * Yukon II. Also I assume this bug does not happen on
2583 * controllers that use newer descriptor format or
2584 * automatic Tx checksum calaulcation.
2586 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
2587 (m->m_pkthdr.len < MSK_MIN_FRAMELEN) &&
2588 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2589 m = m_pullup(m, offset + sizeof(struct tcphdr));
2594 *(uint16_t *)(m->m_data + offset +
2595 m->m_pkthdr.csum_data) = in_cksum_skip(m,
2596 m->m_pkthdr.len, offset);
2597 m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2599 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2600 m = m_pullup(m, offset + sizeof(struct tcphdr));
2605 tcp = (struct tcphdr *)(mtod(m, char *) + offset);
2606 offset += (tcp->th_off << 2);
2611 prod = sc_if->msk_cdata.msk_tx_prod;
2612 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2614 map = txd->tx_dmamap;
2615 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
2616 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2617 if (error == EFBIG) {
2618 m = m_collapse(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
2625 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
2626 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2632 } else if (error != 0)
2640 /* Check number of available descriptors. */
2641 if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
2642 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2643 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2651 /* Check TSO support. */
2652 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2653 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2654 tso_mtu = m->m_pkthdr.tso_segsz;
2656 tso_mtu = offset + m->m_pkthdr.tso_segsz;
2657 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
2658 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2659 tx_le->msk_addr = htole32(tso_mtu);
2660 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2661 tx_le->msk_control = htole32(OP_MSS | HW_OWNER);
2663 tx_le->msk_control =
2664 htole32(OP_LRGLEN | HW_OWNER);
2665 sc_if->msk_cdata.msk_tx_cnt++;
2666 MSK_INC(prod, MSK_TX_RING_CNT);
2667 sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
2671 /* Check if we have a VLAN tag to insert. */
2672 if ((m->m_flags & M_VLANTAG) != 0) {
2674 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2675 tx_le->msk_addr = htole32(0);
2676 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2677 htons(m->m_pkthdr.ether_vtag));
2678 sc_if->msk_cdata.msk_tx_cnt++;
2679 MSK_INC(prod, MSK_TX_RING_CNT);
2681 tx_le->msk_control |= htole32(OP_VLAN |
2682 htons(m->m_pkthdr.ether_vtag));
2684 control |= INS_VLAN;
2686 /* Check if we have to handle checksum offload. */
2687 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
2688 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0)
2691 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2692 tx_le->msk_addr = htole32(((tcp_offset +
2693 m->m_pkthdr.csum_data) & 0xffff) |
2694 ((uint32_t)tcp_offset << 16));
2695 tx_le->msk_control = htole32(1 << 16 |
2696 (OP_TCPLISW | HW_OWNER));
2697 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2698 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2700 sc_if->msk_cdata.msk_tx_cnt++;
2701 MSK_INC(prod, MSK_TX_RING_CNT);
2706 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2707 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2709 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2712 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2714 sc_if->msk_cdata.msk_tx_cnt++;
2715 MSK_INC(prod, MSK_TX_RING_CNT);
2717 for (i = 1; i < nseg; i++) {
2718 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2719 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2720 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2721 OP_BUFFER | HW_OWNER);
2722 sc_if->msk_cdata.msk_tx_cnt++;
2723 MSK_INC(prod, MSK_TX_RING_CNT);
2725 /* Update producer index. */
2726 sc_if->msk_cdata.msk_tx_prod = prod;
2728 /* Set EOP on the last desciptor. */
2729 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2730 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2731 tx_le->msk_control |= htole32(EOP);
2733 /* Turn the first descriptor ownership to hardware. */
2734 tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2735 tx_le->msk_control |= htole32(HW_OWNER);
2737 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2738 map = txd_last->tx_dmamap;
2739 txd_last->tx_dmamap = txd->tx_dmamap;
2740 txd->tx_dmamap = map;
2743 /* Sync descriptors. */
2744 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2745 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2746 sc_if->msk_cdata.msk_tx_ring_map,
2747 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2753 msk_tx_task(void *arg, int pending)
2762 msk_start(struct ifnet *ifp)
2764 struct msk_if_softc *sc_if;
2765 struct mbuf *m_head;
2768 sc_if = ifp->if_softc;
2772 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2773 IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
2774 MSK_IF_UNLOCK(sc_if);
2778 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2779 sc_if->msk_cdata.msk_tx_cnt <
2780 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2781 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2785 * Pack the data into the transmit ring. If we
2786 * don't have room, set the OACTIVE flag and wait
2787 * for the NIC to drain the ring.
2789 if (msk_encap(sc_if, &m_head) != 0) {
2792 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2793 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2799 * If there's a BPF listener, bounce a copy of this frame
2802 ETHER_BPF_MTAP(ifp, m_head);
2807 CSR_WRITE_2(sc_if->msk_softc,
2808 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2809 sc_if->msk_cdata.msk_tx_prod);
2811 /* Set a timeout in case the chip goes out to lunch. */
2812 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
2815 MSK_IF_UNLOCK(sc_if);
2819 msk_watchdog(struct msk_if_softc *sc_if)
2825 MSK_IF_LOCK_ASSERT(sc_if);
2827 if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
2829 ifp = sc_if->msk_ifp;
2830 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
2832 if_printf(sc_if->msk_ifp, "watchdog timeout "
2835 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2836 msk_init_locked(sc_if);
2841 * Reclaim first as there is a possibility of losing Tx completion
2844 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
2845 idx = CSR_READ_2(sc_if->msk_softc, ridx);
2846 if (sc_if->msk_cdata.msk_tx_cons != idx) {
2847 msk_txeof(sc_if, idx);
2848 if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2849 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2851 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2852 taskqueue_enqueue(taskqueue_fast,
2853 &sc_if->msk_tx_task);
2858 if_printf(ifp, "watchdog timeout\n");
2860 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2861 msk_init_locked(sc_if);
2862 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2863 taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task);
2867 mskc_shutdown(device_t dev)
2869 struct msk_softc *sc;
2872 sc = device_get_softc(dev);
2874 for (i = 0; i < sc->msk_num_port; i++) {
2875 if (sc->msk_if[i] != NULL)
2876 msk_stop(sc->msk_if[i]);
2879 /* Disable all interrupts. */
2880 CSR_WRITE_4(sc, B0_IMSK, 0);
2881 CSR_READ_4(sc, B0_IMSK);
2882 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2883 CSR_READ_4(sc, B0_HWE_IMSK);
2885 /* Put hardware reset. */
2886 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2893 mskc_suspend(device_t dev)
2895 struct msk_softc *sc;
2898 sc = device_get_softc(dev);
2902 for (i = 0; i < sc->msk_num_port; i++) {
2903 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2904 ((sc->msk_if[i]->msk_ifp->if_drv_flags &
2905 IFF_DRV_RUNNING) != 0))
2906 msk_stop(sc->msk_if[i]);
2909 /* Disable all interrupts. */
2910 CSR_WRITE_4(sc, B0_IMSK, 0);
2911 CSR_READ_4(sc, B0_IMSK);
2912 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2913 CSR_READ_4(sc, B0_HWE_IMSK);
2915 msk_phy_power(sc, MSK_PHY_POWERDOWN);
2917 /* Put hardware reset. */
2918 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2919 sc->msk_pflags |= MSK_FLAG_SUSPEND;
2927 mskc_resume(device_t dev)
2929 struct msk_softc *sc;
2932 sc = device_get_softc(dev);
2937 for (i = 0; i < sc->msk_num_port; i++) {
2938 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2939 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) {
2940 sc->msk_if[i]->msk_ifp->if_drv_flags &=
2942 msk_init_locked(sc->msk_if[i]);
2945 sc->msk_pflags &= ~MSK_FLAG_SUSPEND;
2952 #ifndef __NO_STRICT_ALIGNMENT
2953 static __inline void
2954 msk_fixup_rx(struct mbuf *m)
2957 uint16_t *src, *dst;
2959 src = mtod(m, uint16_t *);
2962 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2965 m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
2970 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
2975 struct msk_rxdesc *rxd;
2978 ifp = sc_if->msk_ifp;
2980 MSK_IF_LOCK_ASSERT(sc_if);
2982 cons = sc_if->msk_cdata.msk_rx_cons;
2984 rxlen = status >> 16;
2985 if ((status & GMR_FS_VLAN) != 0 &&
2986 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2987 rxlen -= ETHER_VLAN_ENCAP_LEN;
2988 if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) {
2990 * For controllers that returns bogus status code
2991 * just do minimal check and let upper stack
2992 * handle this frame.
2994 if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
2996 msk_discard_rxbuf(sc_if, cons);
2999 } else if (len > sc_if->msk_framesize ||
3000 ((status & GMR_FS_ANY_ERR) != 0) ||
3001 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3002 /* Don't count flow-control packet as errors. */
3003 if ((status & GMR_FS_GOOD_FC) == 0)
3005 msk_discard_rxbuf(sc_if, cons);
3008 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
3010 if (msk_newbuf(sc_if, cons) != 0) {
3012 /* Reuse old buffer. */
3013 msk_discard_rxbuf(sc_if, cons);
3016 m->m_pkthdr.rcvif = ifp;
3017 m->m_pkthdr.len = m->m_len = len;
3018 #ifndef __NO_STRICT_ALIGNMENT
3019 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
3023 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
3024 (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
3025 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3026 if ((control & CSS_IPV4_CSUM_OK) != 0)
3027 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3028 if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
3029 (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
3030 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3032 m->m_pkthdr.csum_data = 0xffff;
3035 /* Check for VLAN tagged packets. */
3036 if ((status & GMR_FS_VLAN) != 0 &&
3037 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3038 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3039 m->m_flags |= M_VLANTAG;
3041 MSK_IF_UNLOCK(sc_if);
3042 (*ifp->if_input)(ifp, m);
3046 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
3047 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
3051 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
3056 struct msk_rxdesc *jrxd;
3059 ifp = sc_if->msk_ifp;
3061 MSK_IF_LOCK_ASSERT(sc_if);
3063 cons = sc_if->msk_cdata.msk_rx_cons;
3065 rxlen = status >> 16;
3066 if ((status & GMR_FS_VLAN) != 0 &&
3067 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3068 rxlen -= ETHER_VLAN_ENCAP_LEN;
3069 if (len > sc_if->msk_framesize ||
3070 ((status & GMR_FS_ANY_ERR) != 0) ||
3071 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3072 /* Don't count flow-control packet as errors. */
3073 if ((status & GMR_FS_GOOD_FC) == 0)
3075 msk_discard_jumbo_rxbuf(sc_if, cons);
3078 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
3080 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
3082 /* Reuse old buffer. */
3083 msk_discard_jumbo_rxbuf(sc_if, cons);
3086 m->m_pkthdr.rcvif = ifp;
3087 m->m_pkthdr.len = m->m_len = len;
3088 #ifndef __NO_STRICT_ALIGNMENT
3089 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
3093 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
3094 (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
3095 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3096 if ((control & CSS_IPV4_CSUM_OK) != 0)
3097 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3098 if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
3099 (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
3100 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3102 m->m_pkthdr.csum_data = 0xffff;
3105 /* Check for VLAN tagged packets. */
3106 if ((status & GMR_FS_VLAN) != 0 &&
3107 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3108 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3109 m->m_flags |= M_VLANTAG;
3111 MSK_IF_UNLOCK(sc_if);
3112 (*ifp->if_input)(ifp, m);
3116 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
3117 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
3121 msk_txeof(struct msk_if_softc *sc_if, int idx)
3123 struct msk_txdesc *txd;
3124 struct msk_tx_desc *cur_tx;
3129 MSK_IF_LOCK_ASSERT(sc_if);
3131 ifp = sc_if->msk_ifp;
3133 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
3134 sc_if->msk_cdata.msk_tx_ring_map,
3135 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3137 * Go through our tx ring and free mbufs for those
3138 * frames that have been sent.
3140 cons = sc_if->msk_cdata.msk_tx_cons;
3142 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
3143 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
3146 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
3147 control = le32toh(cur_tx->msk_control);
3148 sc_if->msk_cdata.msk_tx_cnt--;
3149 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3150 if ((control & EOP) == 0)
3152 txd = &sc_if->msk_cdata.msk_txdesc[cons];
3153 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
3154 BUS_DMASYNC_POSTWRITE);
3155 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
3158 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
3165 sc_if->msk_cdata.msk_tx_cons = cons;
3166 if (sc_if->msk_cdata.msk_tx_cnt == 0)
3167 sc_if->msk_watchdog_timer = 0;
3168 /* No need to sync LEs as we didn't update LEs. */
3173 msk_tick(void *xsc_if)
3175 struct msk_if_softc *sc_if;
3176 struct mii_data *mii;
3180 MSK_IF_LOCK_ASSERT(sc_if);
3182 mii = device_get_softc(sc_if->msk_miibus);
3185 msk_watchdog(sc_if);
3186 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3190 msk_intr_phy(struct msk_if_softc *sc_if)
3194 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3195 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3196 /* Handle FIFO Underrun/Overflow? */
3197 if ((status & PHY_M_IS_FIFO_ERROR))
3198 device_printf(sc_if->msk_if_dev,
3199 "PHY FIFO underrun/overflow.\n");
3203 msk_intr_gmac(struct msk_if_softc *sc_if)
3205 struct msk_softc *sc;
3208 sc = sc_if->msk_softc;
3209 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3211 /* GMAC Rx FIFO overrun. */
3212 if ((status & GM_IS_RX_FF_OR) != 0) {
3213 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3215 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n");
3217 /* GMAC Tx FIFO underrun. */
3218 if ((status & GM_IS_TX_FF_UR) != 0) {
3219 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3221 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3224 * In case of Tx underrun, we may need to flush/reset
3225 * Tx MAC but that would also require resynchronization
3226 * with status LEs. Reintializing status LEs would
3227 * affect other port in dual MAC configuration so it
3228 * should be avoided as possible as we can.
3229 * Due to lack of documentation it's all vague guess but
3230 * it needs more investigation.
3236 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3238 struct msk_softc *sc;
3240 sc = sc_if->msk_softc;
3241 if ((status & Y2_IS_PAR_RD1) != 0) {
3242 device_printf(sc_if->msk_if_dev,
3243 "RAM buffer read parity error\n");
3245 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3248 if ((status & Y2_IS_PAR_WR1) != 0) {
3249 device_printf(sc_if->msk_if_dev,
3250 "RAM buffer write parity error\n");
3252 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3255 if ((status & Y2_IS_PAR_MAC1) != 0) {
3256 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3258 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3261 if ((status & Y2_IS_PAR_RX1) != 0) {
3262 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3264 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3266 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3267 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3269 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3274 msk_intr_hwerr(struct msk_softc *sc)
3277 uint32_t tlphead[4];
3279 status = CSR_READ_4(sc, B0_HWE_ISRC);
3280 /* Time Stamp timer overflow. */
3281 if ((status & Y2_IS_TIST_OV) != 0)
3282 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3283 if ((status & Y2_IS_PCI_NEXP) != 0) {
3285 * PCI Express Error occured which is not described in PEX
3287 * This error is also mapped either to Master Abort(
3288 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3289 * can only be cleared there.
3291 device_printf(sc->msk_dev,
3292 "PCI Express protocol violation error\n");
3295 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3298 if ((status & Y2_IS_MST_ERR) != 0)
3299 device_printf(sc->msk_dev,
3300 "unexpected IRQ Status error\n");
3302 device_printf(sc->msk_dev,
3303 "unexpected IRQ Master error\n");
3304 /* Reset all bits in the PCI status register. */
3305 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3306 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3307 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3308 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3309 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
3310 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3313 /* Check for PCI Express Uncorrectable Error. */
3314 if ((status & Y2_IS_PCI_EXP) != 0) {
3318 * On PCI Express bus bridges are called root complexes (RC).
3319 * PCI Express errors are recognized by the root complex too,
3320 * which requests the system to handle the problem. After
3321 * error occurence it may be that no access to the adapter
3322 * may be performed any longer.
3325 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3326 if ((v32 & PEX_UNSUP_REQ) != 0) {
3327 /* Ignore unsupported request error. */
3328 device_printf(sc->msk_dev,
3329 "Uncorrectable PCI Express error\n");
3331 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3334 /* Get TLP header form Log Registers. */
3335 for (i = 0; i < 4; i++)
3336 tlphead[i] = CSR_PCI_READ_4(sc,
3337 PEX_HEADER_LOG + i * 4);
3338 /* Check for vendor defined broadcast message. */
3339 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3340 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3341 CSR_WRITE_4(sc, B0_HWE_IMSK,
3342 sc->msk_intrhwemask);
3343 CSR_READ_4(sc, B0_HWE_IMSK);
3346 /* Clear the interrupt. */
3347 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3348 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3349 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3352 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3353 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3354 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3355 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3358 static __inline void
3359 msk_rxput(struct msk_if_softc *sc_if)
3361 struct msk_softc *sc;
3363 sc = sc_if->msk_softc;
3364 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN))
3366 sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3367 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3368 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3371 sc_if->msk_cdata.msk_rx_ring_tag,
3372 sc_if->msk_cdata.msk_rx_ring_map,
3373 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3374 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3375 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3379 msk_handle_events(struct msk_softc *sc)
3381 struct msk_if_softc *sc_if;
3383 struct msk_stat_desc *sd;
3384 uint32_t control, status;
3385 int cons, idx, len, port, rxprog;
3387 idx = CSR_READ_2(sc, STAT_PUT_IDX);
3388 if (idx == sc->msk_stat_cons)
3391 /* Sync status LEs. */
3392 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3393 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3394 /* XXX Sync Rx LEs here. */
3396 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3399 for (cons = sc->msk_stat_cons; cons != idx;) {
3400 sd = &sc->msk_stat_ring[cons];
3401 control = le32toh(sd->msk_control);
3402 if ((control & HW_OWNER) == 0)
3405 * Marvell's FreeBSD driver updates status LE after clearing
3406 * HW_OWNER. However we don't have a way to sync single LE
3407 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3408 * an entire DMA map. So don't sync LE until we have a better
3411 control &= ~HW_OWNER;
3412 sd->msk_control = htole32(control);
3413 status = le32toh(sd->msk_status);
3414 len = control & STLE_LEN_MASK;
3415 port = (control >> 16) & 0x01;
3416 sc_if = sc->msk_if[port];
3417 if (sc_if == NULL) {
3418 device_printf(sc->msk_dev, "invalid port opcode "
3419 "0x%08x\n", control & STLE_OP_MASK);
3423 switch (control & STLE_OP_MASK) {
3425 sc_if->msk_vtag = ntohs(len);
3428 sc_if->msk_vtag = ntohs(len);
3431 if (sc_if->msk_framesize >
3432 (MCLBYTES - MSK_RX_BUF_ALIGN))
3433 msk_jumbo_rxeof(sc_if, status, control, len);
3435 msk_rxeof(sc_if, status, control, len);
3438 * Because there is no way to sync single Rx LE
3439 * put the DMA sync operation off until the end of
3443 /* Update prefetch unit if we've passed water mark. */
3444 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3450 if (sc->msk_if[MSK_PORT_A] != NULL)
3451 msk_txeof(sc->msk_if[MSK_PORT_A],
3452 status & STLE_TXA1_MSKL);
3453 if (sc->msk_if[MSK_PORT_B] != NULL)
3454 msk_txeof(sc->msk_if[MSK_PORT_B],
3455 ((status & STLE_TXA2_MSKL) >>
3457 ((len & STLE_TXA2_MSKH) <<
3461 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3462 control & STLE_OP_MASK);
3465 MSK_INC(cons, MSK_STAT_RING_CNT);
3466 if (rxprog > sc->msk_process_limit)
3470 sc->msk_stat_cons = cons;
3471 /* XXX We should sync status LEs here. See above notes. */
3473 if (rxput[MSK_PORT_A] > 0)
3474 msk_rxput(sc->msk_if[MSK_PORT_A]);
3475 if (rxput[MSK_PORT_B] > 0)
3476 msk_rxput(sc->msk_if[MSK_PORT_B]);
3478 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3481 /* Legacy interrupt handler for shared interrupt. */
3483 msk_legacy_intr(void *xsc)
3485 struct msk_softc *sc;
3486 struct msk_if_softc *sc_if0, *sc_if1;
3487 struct ifnet *ifp0, *ifp1;
3493 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3494 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3495 if (status == 0 || status == 0xffffffff ||
3496 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
3497 (status & sc->msk_intrmask) == 0) {
3498 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3502 sc_if0 = sc->msk_if[MSK_PORT_A];
3503 sc_if1 = sc->msk_if[MSK_PORT_B];
3506 ifp0 = sc_if0->msk_ifp;
3508 ifp1 = sc_if1->msk_ifp;
3510 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3511 msk_intr_phy(sc_if0);
3512 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3513 msk_intr_phy(sc_if1);
3514 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3515 msk_intr_gmac(sc_if0);
3516 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3517 msk_intr_gmac(sc_if1);
3518 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3519 device_printf(sc->msk_dev, "Rx descriptor error\n");
3520 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3521 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3522 CSR_READ_4(sc, B0_IMSK);
3524 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3525 device_printf(sc->msk_dev, "Tx descriptor error\n");
3526 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3527 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3528 CSR_READ_4(sc, B0_IMSK);
3530 if ((status & Y2_IS_HW_ERR) != 0)
3533 while (msk_handle_events(sc) != 0)
3535 if ((status & Y2_IS_STAT_BMU) != 0)
3536 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3538 /* Reenable interrupts. */
3539 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3541 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3542 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3543 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
3544 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3545 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3546 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
3554 struct msk_softc *sc;
3558 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3559 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3560 if (status == 0 || status == 0xffffffff) {
3561 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3562 return (FILTER_STRAY);
3565 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3566 return (FILTER_HANDLED);
3570 msk_int_task(void *arg, int pending)
3572 struct msk_softc *sc;
3573 struct msk_if_softc *sc_if0, *sc_if1;
3574 struct ifnet *ifp0, *ifp1;
3581 /* Get interrupt source. */
3582 status = CSR_READ_4(sc, B0_ISRC);
3583 if (status == 0 || status == 0xffffffff ||
3584 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
3585 (status & sc->msk_intrmask) == 0)
3588 sc_if0 = sc->msk_if[MSK_PORT_A];
3589 sc_if1 = sc->msk_if[MSK_PORT_B];
3592 ifp0 = sc_if0->msk_ifp;
3594 ifp1 = sc_if1->msk_ifp;
3596 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3597 msk_intr_phy(sc_if0);
3598 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3599 msk_intr_phy(sc_if1);
3600 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3601 msk_intr_gmac(sc_if0);
3602 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3603 msk_intr_gmac(sc_if1);
3604 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3605 device_printf(sc->msk_dev, "Rx descriptor error\n");
3606 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3607 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3608 CSR_READ_4(sc, B0_IMSK);
3610 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3611 device_printf(sc->msk_dev, "Tx descriptor error\n");
3612 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3613 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3614 CSR_READ_4(sc, B0_IMSK);
3616 if ((status & Y2_IS_HW_ERR) != 0)
3619 domore = msk_handle_events(sc);
3620 if ((status & Y2_IS_STAT_BMU) != 0)
3621 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3623 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3624 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3625 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
3626 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3627 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3628 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
3631 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3638 /* Reenable interrupts. */
3639 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3643 msk_set_tx_stfwd(struct msk_if_softc *sc_if)
3645 struct msk_softc *sc;
3648 ifp = sc_if->msk_ifp;
3649 sc = sc_if->msk_softc;
3650 switch (sc->msk_hw_id) {
3651 case CHIP_ID_YUKON_EX:
3652 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
3653 goto yukon_ex_workaround;
3654 if (ifp->if_mtu > ETHERMTU)
3656 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3657 TX_JUMBO_ENA | TX_STFW_ENA);
3660 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3661 TX_JUMBO_DIS | TX_STFW_ENA);
3664 yukon_ex_workaround:
3665 if (ifp->if_mtu > ETHERMTU) {
3666 /* Set Tx GMAC FIFO Almost Empty Threshold. */
3668 MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3669 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3670 /* Disable Store & Forward mode for Tx. */
3672 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3673 TX_JUMBO_ENA | TX_STFW_DIS);
3675 /* Enable Store & Forward mode for Tx. */
3677 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3678 TX_JUMBO_DIS | TX_STFW_ENA);
3687 struct msk_if_softc *sc_if = xsc;
3690 msk_init_locked(sc_if);
3691 MSK_IF_UNLOCK(sc_if);
3695 msk_init_locked(struct msk_if_softc *sc_if)
3697 struct msk_softc *sc;
3699 struct mii_data *mii;
3700 uint16_t eaddr[ETHER_ADDR_LEN / 2];
3705 MSK_IF_LOCK_ASSERT(sc_if);
3707 ifp = sc_if->msk_ifp;
3708 sc = sc_if->msk_softc;
3709 mii = device_get_softc(sc_if->msk_miibus);
3711 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3715 /* Cancel pending I/O and free all Rx/Tx buffers. */
3718 if (ifp->if_mtu < ETHERMTU)
3719 sc_if->msk_framesize = ETHERMTU;
3721 sc_if->msk_framesize = ifp->if_mtu;
3722 sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3723 if (ifp->if_mtu > ETHERMTU &&
3724 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
3725 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
3726 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
3729 /* GMAC Control reset. */
3730 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
3731 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
3732 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
3733 if (sc->msk_hw_id == CHIP_ID_YUKON_EX)
3734 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
3735 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
3739 * Initialize GMAC first such that speed/duplex/flow-control
3740 * parameters are renegotiated when interface is brought up.
3742 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
3744 /* Dummy read the Interrupt Source Register. */
3745 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3747 /* Clear MIB stats. */
3748 msk_stats_clear(sc_if);
3751 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3753 /* Setup Transmit Control Register. */
3754 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3756 /* Setup Transmit Flow Control Register. */
3757 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3759 /* Setup Transmit Parameter Register. */
3760 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3761 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3762 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3764 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3765 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3767 if (ifp->if_mtu > ETHERMTU)
3768 gmac |= GM_SMOD_JUMBO_ENA;
3769 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3771 /* Set station address. */
3772 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3773 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3774 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
3776 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3777 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
3780 /* Disable interrupts for counter overflows. */
3781 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3782 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3783 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3785 /* Configure Rx MAC FIFO. */
3786 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3787 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3788 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
3789 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
3790 sc->msk_hw_id == CHIP_ID_YUKON_EX)
3791 reg |= GMF_RX_OVER_ON;
3792 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
3794 /* Set receive filter. */
3795 msk_rxfilter(sc_if);
3797 /* Flush Rx MAC FIFO on any flow control or error. */
3798 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3802 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
3803 * due to hardware hang on receipt of pause frames.
3805 reg = RX_GMF_FL_THR_DEF + 1;
3806 /* Another magic for Yukon FE+ - From Linux. */
3807 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3808 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
3810 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
3812 /* Configure Tx MAC FIFO. */
3813 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3814 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3815 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3817 /* Configure hardware VLAN tag insertion/stripping. */
3818 msk_setvlan(sc_if, ifp);
3820 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
3821 /* Set Rx Pause threshould. */
3822 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3824 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3826 /* Configure store-and-forward for Tx. */
3827 msk_set_tx_stfwd(sc_if);
3830 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3831 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
3832 /* Disable dynamic watermark - from Linux. */
3833 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
3835 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
3839 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3840 * arbiter as we don't use Sync Tx queue.
3842 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3843 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3844 /* Enable the RAM Interface Arbiter. */
3845 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3847 /* Setup RAM buffer. */
3848 msk_set_rambuffer(sc_if);
3850 /* Disable Tx sync Queue. */
3851 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3853 /* Setup Tx Queue Bus Memory Interface. */
3854 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3855 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3856 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3857 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3858 switch (sc->msk_hw_id) {
3859 case CHIP_ID_YUKON_EC_U:
3860 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3861 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
3862 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
3866 case CHIP_ID_YUKON_EX:
3868 * Yukon Extreme seems to have silicon bug for
3869 * automatic Tx checksum calculation capability.
3871 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
3872 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
3877 /* Setup Rx Queue Bus Memory Interface. */
3878 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3879 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3880 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3881 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3882 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3883 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3884 /* MAC Rx RAM Read is controlled by hardware. */
3885 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3888 msk_set_prefetch(sc, sc_if->msk_txq,
3889 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3890 msk_init_tx_ring(sc_if);
3892 /* Disable Rx checksum offload and RSS hash. */
3893 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3894 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
3895 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
3896 msk_set_prefetch(sc, sc_if->msk_rxq,
3897 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3898 MSK_JUMBO_RX_RING_CNT - 1);
3899 error = msk_init_jumbo_rx_ring(sc_if);
3901 msk_set_prefetch(sc, sc_if->msk_rxq,
3902 sc_if->msk_rdata.msk_rx_ring_paddr,
3903 MSK_RX_RING_CNT - 1);
3904 error = msk_init_rx_ring(sc_if);
3907 device_printf(sc_if->msk_if_dev,
3908 "initialization failed: no memory for Rx buffers\n");
3913 /* Configure interrupt handling. */
3914 if (sc_if->msk_port == MSK_PORT_A) {
3915 sc->msk_intrmask |= Y2_IS_PORT_A;
3916 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3918 sc->msk_intrmask |= Y2_IS_PORT_B;
3919 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3921 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3922 CSR_READ_4(sc, B0_HWE_IMSK);
3923 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3924 CSR_READ_4(sc, B0_IMSK);
3926 sc_if->msk_flags &= ~MSK_FLAG_LINK;
3929 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3930 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3932 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3936 msk_set_rambuffer(struct msk_if_softc *sc_if)
3938 struct msk_softc *sc;
3941 sc = sc_if->msk_softc;
3942 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
3945 /* Setup Rx Queue. */
3946 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3947 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3948 sc->msk_rxqstart[sc_if->msk_port] / 8);
3949 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
3950 sc->msk_rxqend[sc_if->msk_port] / 8);
3951 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
3952 sc->msk_rxqstart[sc_if->msk_port] / 8);
3953 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
3954 sc->msk_rxqstart[sc_if->msk_port] / 8);
3956 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3957 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
3958 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3959 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
3960 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
3961 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
3962 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
3963 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
3964 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3966 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
3967 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
3969 /* Setup Tx Queue. */
3970 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
3971 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
3972 sc->msk_txqstart[sc_if->msk_port] / 8);
3973 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
3974 sc->msk_txqend[sc_if->msk_port] / 8);
3975 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
3976 sc->msk_txqstart[sc_if->msk_port] / 8);
3977 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
3978 sc->msk_txqstart[sc_if->msk_port] / 8);
3979 /* Enable Store & Forward for Tx side. */
3980 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
3981 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
3982 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
3986 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
3990 /* Reset the prefetch unit. */
3991 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3993 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3995 /* Set LE base address. */
3996 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
3998 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
4000 /* Set the list last index. */
4001 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
4003 /* Turn on prefetch unit. */
4004 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
4006 /* Dummy read to ensure write. */
4007 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
4011 msk_stop(struct msk_if_softc *sc_if)
4013 struct msk_softc *sc;
4014 struct msk_txdesc *txd;
4015 struct msk_rxdesc *rxd;
4016 struct msk_rxdesc *jrxd;
4021 MSK_IF_LOCK_ASSERT(sc_if);
4022 sc = sc_if->msk_softc;
4023 ifp = sc_if->msk_ifp;
4025 callout_stop(&sc_if->msk_tick_ch);
4026 sc_if->msk_watchdog_timer = 0;
4028 /* Disable interrupts. */
4029 if (sc_if->msk_port == MSK_PORT_A) {
4030 sc->msk_intrmask &= ~Y2_IS_PORT_A;
4031 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
4033 sc->msk_intrmask &= ~Y2_IS_PORT_B;
4034 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
4036 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
4037 CSR_READ_4(sc, B0_HWE_IMSK);
4038 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
4039 CSR_READ_4(sc, B0_IMSK);
4041 /* Disable Tx/Rx MAC. */
4042 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
4043 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
4044 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
4045 /* Read again to ensure writing. */
4046 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
4047 /* Update stats and clear counters. */
4048 msk_stats_update(sc_if);
4051 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
4052 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
4053 for (i = 0; i < MSK_TIMEOUT; i++) {
4054 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
4055 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
4057 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
4062 if (i == MSK_TIMEOUT)
4063 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
4064 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
4065 RB_RST_SET | RB_DIS_OP_MD);
4067 /* Disable all GMAC interrupt. */
4068 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
4069 /* Disable PHY interrupt. */
4070 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
4072 /* Disable the RAM Interface Arbiter. */
4073 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
4075 /* Reset the PCI FIFO of the async Tx queue */
4076 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
4077 BMU_RST_SET | BMU_FIFO_RST);
4079 /* Reset the Tx prefetch units. */
4080 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
4083 /* Reset the RAM Buffer async Tx queue. */
4084 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
4086 /* Reset Tx MAC FIFO. */
4087 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
4088 /* Set Pause Off. */
4089 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
4092 * The Rx Stop command will not work for Yukon-2 if the BMU does not
4093 * reach the end of packet and since we can't make sure that we have
4094 * incoming data, we must reset the BMU while it is not during a DMA
4095 * transfer. Since it is possible that the Rx path is still active,
4096 * the Rx RAM buffer will be stopped first, so any possible incoming
4097 * data will not trigger a DMA. After the RAM buffer is stopped, the
4098 * BMU is polled until any DMA in progress is ended and only then it
4102 /* Disable the RAM Buffer receive queue. */
4103 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
4104 for (i = 0; i < MSK_TIMEOUT; i++) {
4105 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
4106 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
4110 if (i == MSK_TIMEOUT)
4111 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
4112 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
4113 BMU_RST_SET | BMU_FIFO_RST);
4114 /* Reset the Rx prefetch unit. */
4115 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
4117 /* Reset the RAM Buffer receive queue. */
4118 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
4119 /* Reset Rx MAC FIFO. */
4120 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
4122 /* Free Rx and Tx mbufs still in the queues. */
4123 for (i = 0; i < MSK_RX_RING_CNT; i++) {
4124 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
4125 if (rxd->rx_m != NULL) {
4126 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
4127 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4128 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
4134 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
4135 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
4136 if (jrxd->rx_m != NULL) {
4137 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
4138 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4139 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
4141 m_freem(jrxd->rx_m);
4145 for (i = 0; i < MSK_TX_RING_CNT; i++) {
4146 txd = &sc_if->msk_cdata.msk_txdesc[i];
4147 if (txd->tx_m != NULL) {
4148 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
4149 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
4150 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
4158 * Mark the interface down.
4160 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4161 sc_if->msk_flags &= ~MSK_FLAG_LINK;
4165 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
4166 * counter clears high 16 bits of the counter such that accessing
4167 * lower 16 bits should be the last operation.
4169 #define MSK_READ_MIB32(x, y) \
4170 (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) + \
4171 (uint32_t)GMAC_READ_2(sc, x, y)
4172 #define MSK_READ_MIB64(x, y) \
4173 (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) + \
4174 (uint64_t)MSK_READ_MIB32(x, y)
4177 msk_stats_clear(struct msk_if_softc *sc_if)
4179 struct msk_softc *sc;
4184 MSK_IF_LOCK_ASSERT(sc_if);
4186 sc = sc_if->msk_softc;
4187 /* Set MIB Clear Counter Mode. */
4188 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
4189 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
4190 /* Read all MIB Counters with Clear Mode set. */
4191 for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i++)
4192 reg = MSK_READ_MIB32(sc_if->msk_port, i);
4193 /* Clear MIB Clear Counter Mode. */
4194 gmac &= ~GM_PAR_MIB_CLR;
4195 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4199 msk_stats_update(struct msk_if_softc *sc_if)
4201 struct msk_softc *sc;
4203 struct msk_hw_stats *stats;
4207 MSK_IF_LOCK_ASSERT(sc_if);
4209 ifp = sc_if->msk_ifp;
4210 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
4212 sc = sc_if->msk_softc;
4213 stats = &sc_if->msk_stats;
4214 /* Set MIB Clear Counter Mode. */
4215 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
4216 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
4219 stats->rx_ucast_frames +=
4220 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK);
4221 stats->rx_bcast_frames +=
4222 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK);
4223 stats->rx_pause_frames +=
4224 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE);
4225 stats->rx_mcast_frames +=
4226 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK);
4227 stats->rx_crc_errs +=
4228 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR);
4229 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1);
4230 stats->rx_good_octets +=
4231 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO);
4232 stats->rx_bad_octets +=
4233 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO);
4235 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT);
4236 stats->rx_runt_errs +=
4237 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG);
4238 stats->rx_pkts_64 +=
4239 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B);
4240 stats->rx_pkts_65_127 +=
4241 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B);
4242 stats->rx_pkts_128_255 +=
4243 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B);
4244 stats->rx_pkts_256_511 +=
4245 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B);
4246 stats->rx_pkts_512_1023 +=
4247 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B);
4248 stats->rx_pkts_1024_1518 +=
4249 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B);
4250 stats->rx_pkts_1519_max +=
4251 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ);
4252 stats->rx_pkts_too_long +=
4253 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR);
4254 stats->rx_pkts_jabbers +=
4255 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT);
4256 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2);
4257 stats->rx_fifo_oflows +=
4258 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV);
4259 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3);
4262 stats->tx_ucast_frames +=
4263 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK);
4264 stats->tx_bcast_frames +=
4265 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK);
4266 stats->tx_pause_frames +=
4267 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE);
4268 stats->tx_mcast_frames +=
4269 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK);
4271 MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO);
4272 stats->tx_pkts_64 +=
4273 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B);
4274 stats->tx_pkts_65_127 +=
4275 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B);
4276 stats->tx_pkts_128_255 +=
4277 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B);
4278 stats->tx_pkts_256_511 +=
4279 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B);
4280 stats->tx_pkts_512_1023 +=
4281 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B);
4282 stats->tx_pkts_1024_1518 +=
4283 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B);
4284 stats->tx_pkts_1519_max +=
4285 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ);
4286 reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1);
4288 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL);
4289 stats->tx_late_colls +=
4290 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL);
4291 stats->tx_excess_colls +=
4292 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL);
4293 stats->tx_multi_colls +=
4294 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL);
4295 stats->tx_single_colls +=
4296 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL);
4297 stats->tx_underflows +=
4298 MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR);
4299 /* Clear MIB Clear Counter Mode. */
4300 gmac &= ~GM_PAR_MIB_CLR;
4301 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4305 msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
4307 struct msk_softc *sc;
4308 struct msk_if_softc *sc_if;
4309 uint32_t result, *stat;
4312 sc_if = (struct msk_if_softc *)arg1;
4313 sc = sc_if->msk_softc;
4315 stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off);
4318 result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4320 MSK_IF_UNLOCK(sc_if);
4322 return (sysctl_handle_int(oidp, &result, 0, req));
4326 msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
4328 struct msk_softc *sc;
4329 struct msk_if_softc *sc_if;
4330 uint64_t result, *stat;
4333 sc_if = (struct msk_if_softc *)arg1;
4334 sc = sc_if->msk_softc;
4336 stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off);
4339 result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4341 MSK_IF_UNLOCK(sc_if);
4343 return (sysctl_handle_quad(oidp, &result, 0, req));
4346 #undef MSK_READ_MIB32
4347 #undef MSK_READ_MIB64
4349 #define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) \
4350 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \
4351 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32, \
4353 #define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) \
4354 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \
4355 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64, \
4359 msk_sysctl_node(struct msk_if_softc *sc_if)
4361 struct sysctl_ctx_list *ctx;
4362 struct sysctl_oid_list *child, *schild;
4363 struct sysctl_oid *tree;
4365 ctx = device_get_sysctl_ctx(sc_if->msk_if_dev);
4366 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev));
4368 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
4369 NULL, "MSK Statistics");
4370 schild = child = SYSCTL_CHILDREN(tree);
4371 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
4372 NULL, "MSK RX Statistics");
4373 child = SYSCTL_CHILDREN(tree);
4374 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4375 child, rx_ucast_frames, "Good unicast frames");
4376 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4377 child, rx_bcast_frames, "Good broadcast frames");
4378 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4379 child, rx_pause_frames, "Pause frames");
4380 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4381 child, rx_mcast_frames, "Multicast frames");
4382 MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs",
4383 child, rx_crc_errs, "CRC errors");
4384 MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets",
4385 child, rx_good_octets, "Good octets");
4386 MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets",
4387 child, rx_bad_octets, "Bad octets");
4388 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4389 child, rx_pkts_64, "64 bytes frames");
4390 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4391 child, rx_pkts_65_127, "65 to 127 bytes frames");
4392 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4393 child, rx_pkts_128_255, "128 to 255 bytes frames");
4394 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4395 child, rx_pkts_256_511, "256 to 511 bytes frames");
4396 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4397 child, rx_pkts_512_1023, "512 to 1023 bytes frames");
4398 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4399 child, rx_pkts_1024_1518, "1024 to 1518 bytes frames");
4400 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4401 child, rx_pkts_1519_max, "1519 to max frames");
4402 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long",
4403 child, rx_pkts_too_long, "frames too long");
4404 MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers",
4405 child, rx_pkts_jabbers, "Jabber errors");
4406 MSK_SYSCTL_STAT32(sc_if, ctx, "overflows",
4407 child, rx_fifo_oflows, "FIFO overflows");
4409 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
4410 NULL, "MSK TX Statistics");
4411 child = SYSCTL_CHILDREN(tree);
4412 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4413 child, tx_ucast_frames, "Unicast frames");
4414 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4415 child, tx_bcast_frames, "Broadcast frames");
4416 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4417 child, tx_pause_frames, "Pause frames");
4418 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4419 child, tx_mcast_frames, "Multicast frames");
4420 MSK_SYSCTL_STAT64(sc_if, ctx, "octets",
4421 child, tx_octets, "Octets");
4422 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4423 child, tx_pkts_64, "64 bytes frames");
4424 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4425 child, tx_pkts_65_127, "65 to 127 bytes frames");
4426 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4427 child, tx_pkts_128_255, "128 to 255 bytes frames");
4428 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4429 child, tx_pkts_256_511, "256 to 511 bytes frames");
4430 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4431 child, tx_pkts_512_1023, "512 to 1023 bytes frames");
4432 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4433 child, tx_pkts_1024_1518, "1024 to 1518 bytes frames");
4434 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4435 child, tx_pkts_1519_max, "1519 to max frames");
4436 MSK_SYSCTL_STAT32(sc_if, ctx, "colls",
4437 child, tx_colls, "Collisions");
4438 MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls",
4439 child, tx_late_colls, "Late collisions");
4440 MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls",
4441 child, tx_excess_colls, "Excessive collisions");
4442 MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls",
4443 child, tx_multi_colls, "Multiple collisions");
4444 MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls",
4445 child, tx_single_colls, "Single collisions");
4446 MSK_SYSCTL_STAT32(sc_if, ctx, "underflows",
4447 child, tx_underflows, "FIFO underflows");
4450 #undef MSK_SYSCTL_STAT32
4451 #undef MSK_SYSCTL_STAT64
4454 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4460 value = *(int *)arg1;
4461 error = sysctl_handle_int(oidp, &value, 0, req);
4462 if (error || !req->newptr)
4464 if (value < low || value > high)
4466 *(int *)arg1 = value;
4472 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
4475 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,