1 /******************************************************************************
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
9 *****************************************************************************/
11 /******************************************************************************
14 * Copyright (C) Marvell International Ltd. and/or its affiliates
16 * The computer program files contained in this folder ("Files")
17 * are provided to you under the BSD-type license terms provided
18 * below, and any use of such Files and any derivative works
19 * thereof created by you shall be governed by the following terms
22 * - Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials provided
27 * with the distribution.
28 * - Neither the name of Marvell nor the names of its contributors
29 * may be used to endorse or promote products derived from this
30 * software without specific prior written permission.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 *****************************************************************************/
49 * Copyright (c) 1997, 1998, 1999, 2000
50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
96 * Device driver for the Marvell Yukon II Ethernet controller.
97 * Due to lack of documentation, this driver is based on the code from
98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
101 #include <sys/cdefs.h>
102 __FBSDID("$FreeBSD$");
104 #include <sys/param.h>
105 #include <sys/systm.h>
107 #include <sys/endian.h>
108 #include <sys/mbuf.h>
109 #include <sys/malloc.h>
110 #include <sys/kernel.h>
111 #include <sys/module.h>
112 #include <sys/socket.h>
113 #include <sys/sockio.h>
114 #include <sys/queue.h>
115 #include <sys/sysctl.h>
118 #include <net/ethernet.h>
120 #include <net/if_arp.h>
121 #include <net/if_dl.h>
122 #include <net/if_media.h>
123 #include <net/if_types.h>
124 #include <net/if_vlan_var.h>
126 #include <netinet/in.h>
127 #include <netinet/in_systm.h>
128 #include <netinet/ip.h>
129 #include <netinet/tcp.h>
130 #include <netinet/udp.h>
132 #include <machine/bus.h>
133 #include <machine/in_cksum.h>
134 #include <machine/resource.h>
135 #include <sys/rman.h>
137 #include <dev/mii/mii.h>
138 #include <dev/mii/miivar.h>
140 #include <dev/pci/pcireg.h>
141 #include <dev/pci/pcivar.h>
143 #include <dev/msk/if_mskreg.h>
145 MODULE_DEPEND(msk, pci, 1, 1, 1);
146 MODULE_DEPEND(msk, ether, 1, 1, 1);
147 MODULE_DEPEND(msk, miibus, 1, 1, 1);
149 /* "device miibus" required. See GENERIC if you get errors here. */
150 #include "miibus_if.h"
153 static int msi_disable = 0;
154 TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
155 static int legacy_intr = 0;
156 TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
157 static int jumbo_disable = 0;
158 TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
160 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
163 * Devices supported by this driver.
165 static struct msk_product {
166 uint16_t msk_vendorid;
167 uint16_t msk_deviceid;
168 const char *msk_name;
170 { VENDORID_SK, DEVICEID_SK_YUKON2,
171 "SK-9Sxx Gigabit Ethernet" },
172 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
173 "SK-9Exx Gigabit Ethernet"},
174 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
175 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
176 { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
177 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
178 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
179 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
180 { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
181 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
182 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
183 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
184 { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
185 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
186 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
187 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
188 { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
189 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
190 { VENDORID_MARVELL, DEVICEID_MRVL_8035,
191 "Marvell Yukon 88E8035 Fast Ethernet" },
192 { VENDORID_MARVELL, DEVICEID_MRVL_8036,
193 "Marvell Yukon 88E8036 Fast Ethernet" },
194 { VENDORID_MARVELL, DEVICEID_MRVL_8038,
195 "Marvell Yukon 88E8038 Fast Ethernet" },
196 { VENDORID_MARVELL, DEVICEID_MRVL_8039,
197 "Marvell Yukon 88E8039 Fast Ethernet" },
198 { VENDORID_MARVELL, DEVICEID_MRVL_8040,
199 "Marvell Yukon 88E8040 Fast Ethernet" },
200 { VENDORID_MARVELL, DEVICEID_MRVL_8040T,
201 "Marvell Yukon 88E8040T Fast Ethernet" },
202 { VENDORID_MARVELL, DEVICEID_MRVL_8042,
203 "Marvell Yukon 88E8042 Fast Ethernet" },
204 { VENDORID_MARVELL, DEVICEID_MRVL_8048,
205 "Marvell Yukon 88E8048 Fast Ethernet" },
206 { VENDORID_MARVELL, DEVICEID_MRVL_4361,
207 "Marvell Yukon 88E8050 Gigabit Ethernet" },
208 { VENDORID_MARVELL, DEVICEID_MRVL_4360,
209 "Marvell Yukon 88E8052 Gigabit Ethernet" },
210 { VENDORID_MARVELL, DEVICEID_MRVL_4362,
211 "Marvell Yukon 88E8053 Gigabit Ethernet" },
212 { VENDORID_MARVELL, DEVICEID_MRVL_4363,
213 "Marvell Yukon 88E8055 Gigabit Ethernet" },
214 { VENDORID_MARVELL, DEVICEID_MRVL_4364,
215 "Marvell Yukon 88E8056 Gigabit Ethernet" },
216 { VENDORID_MARVELL, DEVICEID_MRVL_4365,
217 "Marvell Yukon 88E8070 Gigabit Ethernet" },
218 { VENDORID_MARVELL, DEVICEID_MRVL_436A,
219 "Marvell Yukon 88E8058 Gigabit Ethernet" },
220 { VENDORID_MARVELL, DEVICEID_MRVL_436B,
221 "Marvell Yukon 88E8071 Gigabit Ethernet" },
222 { VENDORID_MARVELL, DEVICEID_MRVL_436C,
223 "Marvell Yukon 88E8072 Gigabit Ethernet" },
224 { VENDORID_MARVELL, DEVICEID_MRVL_4380,
225 "Marvell Yukon 88E8057 Gigabit Ethernet" },
226 { VENDORID_MARVELL, DEVICEID_MRVL_4381,
227 "Marvell Yukon 88E8059 Gigabit Ethernet" },
228 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
229 "D-Link 550SX Gigabit Ethernet" },
230 { VENDORID_DLINK, DEVICEID_DLINK_DGE560SX,
231 "D-Link 560SX Gigabit Ethernet" },
232 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
233 "D-Link 560T Gigabit Ethernet" }
236 static const char *model_name[] = {
249 static int mskc_probe(device_t);
250 static int mskc_attach(device_t);
251 static int mskc_detach(device_t);
252 static int mskc_shutdown(device_t);
253 static int mskc_setup_rambuffer(struct msk_softc *);
254 static int mskc_suspend(device_t);
255 static int mskc_resume(device_t);
256 static void mskc_reset(struct msk_softc *);
258 static int msk_probe(device_t);
259 static int msk_attach(device_t);
260 static int msk_detach(device_t);
262 static void msk_tick(void *);
263 static void msk_intr(void *);
264 static void msk_intr_phy(struct msk_if_softc *);
265 static void msk_intr_gmac(struct msk_if_softc *);
266 static __inline void msk_rxput(struct msk_if_softc *);
267 static int msk_handle_events(struct msk_softc *);
268 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
269 static void msk_intr_hwerr(struct msk_softc *);
270 #ifndef __NO_STRICT_ALIGNMENT
271 static __inline void msk_fixup_rx(struct mbuf *);
273 static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
274 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
275 static void msk_txeof(struct msk_if_softc *, int);
276 static int msk_encap(struct msk_if_softc *, struct mbuf **);
277 static void msk_start(struct ifnet *);
278 static void msk_start_locked(struct ifnet *);
279 static int msk_ioctl(struct ifnet *, u_long, caddr_t);
280 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
281 static void msk_set_rambuffer(struct msk_if_softc *);
282 static void msk_set_tx_stfwd(struct msk_if_softc *);
283 static void msk_init(void *);
284 static void msk_init_locked(struct msk_if_softc *);
285 static void msk_stop(struct msk_if_softc *);
286 static void msk_watchdog(struct msk_if_softc *);
287 static int msk_mediachange(struct ifnet *);
288 static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
289 static void msk_phy_power(struct msk_softc *, int);
290 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
291 static int msk_status_dma_alloc(struct msk_softc *);
292 static void msk_status_dma_free(struct msk_softc *);
293 static int msk_txrx_dma_alloc(struct msk_if_softc *);
294 static int msk_rx_dma_jalloc(struct msk_if_softc *);
295 static void msk_txrx_dma_free(struct msk_if_softc *);
296 static void msk_rx_dma_jfree(struct msk_if_softc *);
297 static int msk_init_rx_ring(struct msk_if_softc *);
298 static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
299 static void msk_init_tx_ring(struct msk_if_softc *);
300 static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
301 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
302 static int msk_newbuf(struct msk_if_softc *, int);
303 static int msk_jumbo_newbuf(struct msk_if_softc *, int);
305 static int msk_phy_readreg(struct msk_if_softc *, int, int);
306 static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
307 static int msk_miibus_readreg(device_t, int, int);
308 static int msk_miibus_writereg(device_t, int, int, int);
309 static void msk_miibus_statchg(device_t);
311 static void msk_rxfilter(struct msk_if_softc *);
312 static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
314 static void msk_stats_clear(struct msk_if_softc *);
315 static void msk_stats_update(struct msk_if_softc *);
316 static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS);
317 static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS);
318 static void msk_sysctl_node(struct msk_if_softc *);
319 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
320 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
322 static device_method_t mskc_methods[] = {
323 /* Device interface */
324 DEVMETHOD(device_probe, mskc_probe),
325 DEVMETHOD(device_attach, mskc_attach),
326 DEVMETHOD(device_detach, mskc_detach),
327 DEVMETHOD(device_suspend, mskc_suspend),
328 DEVMETHOD(device_resume, mskc_resume),
329 DEVMETHOD(device_shutdown, mskc_shutdown),
332 DEVMETHOD(bus_print_child, bus_generic_print_child),
333 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
338 static driver_t mskc_driver = {
341 sizeof(struct msk_softc)
344 static devclass_t mskc_devclass;
346 static device_method_t msk_methods[] = {
347 /* Device interface */
348 DEVMETHOD(device_probe, msk_probe),
349 DEVMETHOD(device_attach, msk_attach),
350 DEVMETHOD(device_detach, msk_detach),
351 DEVMETHOD(device_shutdown, bus_generic_shutdown),
354 DEVMETHOD(bus_print_child, bus_generic_print_child),
355 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
358 DEVMETHOD(miibus_readreg, msk_miibus_readreg),
359 DEVMETHOD(miibus_writereg, msk_miibus_writereg),
360 DEVMETHOD(miibus_statchg, msk_miibus_statchg),
365 static driver_t msk_driver = {
368 sizeof(struct msk_if_softc)
371 static devclass_t msk_devclass;
373 DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0);
374 DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0);
375 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0);
377 static struct resource_spec msk_res_spec_io[] = {
378 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
382 static struct resource_spec msk_res_spec_mem[] = {
383 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
387 static struct resource_spec msk_irq_spec_legacy[] = {
388 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
392 static struct resource_spec msk_irq_spec_msi[] = {
393 { SYS_RES_IRQ, 1, RF_ACTIVE },
398 msk_miibus_readreg(device_t dev, int phy, int reg)
400 struct msk_if_softc *sc_if;
402 sc_if = device_get_softc(dev);
404 return (msk_phy_readreg(sc_if, phy, reg));
408 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
410 struct msk_softc *sc;
413 sc = sc_if->msk_softc;
415 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
416 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
418 for (i = 0; i < MSK_TIMEOUT; i++) {
420 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
421 if ((val & GM_SMI_CT_RD_VAL) != 0) {
422 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
427 if (i == MSK_TIMEOUT) {
428 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
436 msk_miibus_writereg(device_t dev, int phy, int reg, int val)
438 struct msk_if_softc *sc_if;
440 sc_if = device_get_softc(dev);
442 return (msk_phy_writereg(sc_if, phy, reg, val));
446 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
448 struct msk_softc *sc;
451 sc = sc_if->msk_softc;
453 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
454 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
455 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
456 for (i = 0; i < MSK_TIMEOUT; i++) {
458 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
459 GM_SMI_CT_BUSY) == 0)
462 if (i == MSK_TIMEOUT)
463 if_printf(sc_if->msk_ifp, "phy write timeout\n");
469 msk_miibus_statchg(device_t dev)
471 struct msk_softc *sc;
472 struct msk_if_softc *sc_if;
473 struct mii_data *mii;
477 sc_if = device_get_softc(dev);
478 sc = sc_if->msk_softc;
480 MSK_IF_LOCK_ASSERT(sc_if);
482 mii = device_get_softc(sc_if->msk_miibus);
483 ifp = sc_if->msk_ifp;
484 if (mii == NULL || ifp == NULL ||
485 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
488 sc_if->msk_flags &= ~MSK_FLAG_LINK;
489 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
490 (IFM_AVALID | IFM_ACTIVE)) {
491 switch (IFM_SUBTYPE(mii->mii_media_active)) {
494 sc_if->msk_flags |= MSK_FLAG_LINK;
500 if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
501 sc_if->msk_flags |= MSK_FLAG_LINK;
508 if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) {
509 /* Enable Tx FIFO Underrun. */
510 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
511 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
513 * Because mii(4) notify msk(4) that it detected link status
514 * change, there is no need to enable automatic
515 * speed/flow-control/duplex updates.
517 gmac = GM_GPCR_AU_ALL_DIS;
518 switch (IFM_SUBTYPE(mii->mii_media_active)) {
521 gmac |= GM_GPCR_SPEED_1000;
524 gmac |= GM_GPCR_SPEED_100;
530 if ((IFM_OPTIONS(mii->mii_media_active) &
531 IFM_ETH_RXPAUSE) == 0)
532 gmac |= GM_GPCR_FC_RX_DIS;
533 if ((IFM_OPTIONS(mii->mii_media_active) &
534 IFM_ETH_TXPAUSE) == 0)
535 gmac |= GM_GPCR_FC_TX_DIS;
536 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
537 gmac |= GM_GPCR_DUP_FULL;
539 gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS;
540 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
541 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
542 /* Read again to ensure writing. */
543 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
544 gmac = GMC_PAUSE_OFF;
545 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
546 if ((IFM_OPTIONS(mii->mii_media_active) &
547 IFM_ETH_RXPAUSE) != 0)
550 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
552 /* Enable PHY interrupt for FIFO underrun/overflow. */
553 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
554 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
557 * Link state changed to down.
558 * Disable PHY interrupts.
560 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
561 /* Disable Rx/Tx MAC. */
562 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
563 if ((GM_GPCR_RX_ENA | GM_GPCR_TX_ENA) != 0) {
564 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
565 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
566 /* Read again to ensure writing. */
567 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
573 msk_rxfilter(struct msk_if_softc *sc_if)
575 struct msk_softc *sc;
577 struct ifmultiaddr *ifma;
582 sc = sc_if->msk_softc;
584 MSK_IF_LOCK_ASSERT(sc_if);
586 ifp = sc_if->msk_ifp;
588 bzero(mchash, sizeof(mchash));
589 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
590 if ((ifp->if_flags & IFF_PROMISC) != 0)
591 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
592 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
593 mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
597 mode |= GM_RXCR_UCF_ENA;
599 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
600 if (ifma->ifma_addr->sa_family != AF_LINK)
602 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
603 ifma->ifma_addr), ETHER_ADDR_LEN);
604 /* Just want the 6 least significant bits. */
606 /* Set the corresponding bit in the hash table. */
607 mchash[crc >> 5] |= 1 << (crc & 0x1f);
609 if_maddr_runlock(ifp);
610 if (mchash[0] != 0 || mchash[1] != 0)
611 mode |= GM_RXCR_MCF_ENA;
614 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
616 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
617 (mchash[0] >> 16) & 0xffff);
618 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
620 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
621 (mchash[1] >> 16) & 0xffff);
622 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
626 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
628 struct msk_softc *sc;
630 sc = sc_if->msk_softc;
631 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
632 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
634 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
637 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
639 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
645 msk_init_rx_ring(struct msk_if_softc *sc_if)
647 struct msk_ring_data *rd;
648 struct msk_rxdesc *rxd;
651 MSK_IF_LOCK_ASSERT(sc_if);
653 sc_if->msk_cdata.msk_rx_cons = 0;
654 sc_if->msk_cdata.msk_rx_prod = 0;
655 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
657 rd = &sc_if->msk_rdata;
658 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
659 prod = sc_if->msk_cdata.msk_rx_prod;
660 for (i = 0; i < MSK_RX_RING_CNT; i++) {
661 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
663 rxd->rx_le = &rd->msk_rx_ring[prod];
664 if (msk_newbuf(sc_if, prod) != 0)
666 MSK_INC(prod, MSK_RX_RING_CNT);
669 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
670 sc_if->msk_cdata.msk_rx_ring_map,
671 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
673 /* Update prefetch unit. */
674 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
675 CSR_WRITE_2(sc_if->msk_softc,
676 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
677 sc_if->msk_cdata.msk_rx_prod);
683 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
685 struct msk_ring_data *rd;
686 struct msk_rxdesc *rxd;
689 MSK_IF_LOCK_ASSERT(sc_if);
691 sc_if->msk_cdata.msk_rx_cons = 0;
692 sc_if->msk_cdata.msk_rx_prod = 0;
693 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
695 rd = &sc_if->msk_rdata;
696 bzero(rd->msk_jumbo_rx_ring,
697 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
698 prod = sc_if->msk_cdata.msk_rx_prod;
699 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
700 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
702 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
703 if (msk_jumbo_newbuf(sc_if, prod) != 0)
705 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
708 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
709 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
710 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
712 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
713 CSR_WRITE_2(sc_if->msk_softc,
714 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
715 sc_if->msk_cdata.msk_rx_prod);
721 msk_init_tx_ring(struct msk_if_softc *sc_if)
723 struct msk_ring_data *rd;
724 struct msk_txdesc *txd;
727 sc_if->msk_cdata.msk_tso_mtu = 0;
728 sc_if->msk_cdata.msk_last_csum = 0;
729 sc_if->msk_cdata.msk_tx_prod = 0;
730 sc_if->msk_cdata.msk_tx_cons = 0;
731 sc_if->msk_cdata.msk_tx_cnt = 0;
733 rd = &sc_if->msk_rdata;
734 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
735 for (i = 0; i < MSK_TX_RING_CNT; i++) {
736 txd = &sc_if->msk_cdata.msk_txdesc[i];
738 txd->tx_le = &rd->msk_tx_ring[i];
741 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
742 sc_if->msk_cdata.msk_tx_ring_map,
743 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
747 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
749 struct msk_rx_desc *rx_le;
750 struct msk_rxdesc *rxd;
753 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
756 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
760 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
762 struct msk_rx_desc *rx_le;
763 struct msk_rxdesc *rxd;
766 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
769 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
773 msk_newbuf(struct msk_if_softc *sc_if, int idx)
775 struct msk_rx_desc *rx_le;
776 struct msk_rxdesc *rxd;
778 bus_dma_segment_t segs[1];
782 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
786 m->m_len = m->m_pkthdr.len = MCLBYTES;
787 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
788 m_adj(m, ETHER_ALIGN);
789 #ifndef __NO_STRICT_ALIGNMENT
791 m_adj(m, MSK_RX_BUF_ALIGN);
794 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
795 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
796 BUS_DMA_NOWAIT) != 0) {
800 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
802 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
803 if (rxd->rx_m != NULL) {
804 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
805 BUS_DMASYNC_POSTREAD);
806 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
808 map = rxd->rx_dmamap;
809 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
810 sc_if->msk_cdata.msk_rx_sparemap = map;
811 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
812 BUS_DMASYNC_PREREAD);
815 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
817 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
823 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
825 struct msk_rx_desc *rx_le;
826 struct msk_rxdesc *rxd;
828 bus_dma_segment_t segs[1];
832 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
835 if ((m->m_flags & M_EXT) == 0) {
839 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
840 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
841 m_adj(m, ETHER_ALIGN);
842 #ifndef __NO_STRICT_ALIGNMENT
844 m_adj(m, MSK_RX_BUF_ALIGN);
847 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
848 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
849 BUS_DMA_NOWAIT) != 0) {
853 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
855 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
856 if (rxd->rx_m != NULL) {
857 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
858 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
859 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
862 map = rxd->rx_dmamap;
863 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
864 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
865 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
866 BUS_DMASYNC_PREREAD);
869 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
871 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
880 msk_mediachange(struct ifnet *ifp)
882 struct msk_if_softc *sc_if;
883 struct mii_data *mii;
886 sc_if = ifp->if_softc;
889 mii = device_get_softc(sc_if->msk_miibus);
890 error = mii_mediachg(mii);
891 MSK_IF_UNLOCK(sc_if);
897 * Report current media status.
900 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
902 struct msk_if_softc *sc_if;
903 struct mii_data *mii;
905 sc_if = ifp->if_softc;
907 if ((ifp->if_flags & IFF_UP) == 0) {
908 MSK_IF_UNLOCK(sc_if);
911 mii = device_get_softc(sc_if->msk_miibus);
914 MSK_IF_UNLOCK(sc_if);
915 ifmr->ifm_active = mii->mii_media_active;
916 ifmr->ifm_status = mii->mii_media_status;
920 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
922 struct msk_if_softc *sc_if;
924 struct mii_data *mii;
927 sc_if = ifp->if_softc;
928 ifr = (struct ifreq *)data;
934 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
936 else if (ifp->if_mtu != ifr->ifr_mtu) {
937 if (ifr->ifr_mtu > ETHERMTU) {
938 if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
940 MSK_IF_UNLOCK(sc_if);
943 if ((sc_if->msk_flags &
944 MSK_FLAG_JUMBO_NOCSUM) != 0) {
946 ~(MSK_CSUM_FEATURES | CSUM_TSO);
948 ~(IFCAP_TSO4 | IFCAP_TXCSUM);
949 VLAN_CAPABILITIES(ifp);
952 ifp->if_mtu = ifr->ifr_mtu;
953 msk_init_locked(sc_if);
955 MSK_IF_UNLOCK(sc_if);
959 if ((ifp->if_flags & IFF_UP) != 0) {
960 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
961 ((ifp->if_flags ^ sc_if->msk_if_flags) &
962 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
964 else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0)
965 msk_init_locked(sc_if);
966 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
968 sc_if->msk_if_flags = ifp->if_flags;
969 MSK_IF_UNLOCK(sc_if);
974 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
976 MSK_IF_UNLOCK(sc_if);
980 mii = device_get_softc(sc_if->msk_miibus);
981 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
985 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
986 if ((mask & IFCAP_TXCSUM) != 0 &&
987 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
988 ifp->if_capenable ^= IFCAP_TXCSUM;
989 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
990 ifp->if_hwassist |= MSK_CSUM_FEATURES;
992 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
994 if ((mask & IFCAP_RXCSUM) != 0 &&
995 (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
996 ifp->if_capenable ^= IFCAP_RXCSUM;
997 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
998 (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0)
999 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1000 if ((mask & IFCAP_TSO4) != 0 &&
1001 (IFCAP_TSO4 & ifp->if_capabilities) != 0) {
1002 ifp->if_capenable ^= IFCAP_TSO4;
1003 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1004 ifp->if_hwassist |= CSUM_TSO;
1006 ifp->if_hwassist &= ~CSUM_TSO;
1008 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1009 (IFCAP_VLAN_HWTSO & ifp->if_capabilities) != 0)
1010 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1011 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1012 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
1013 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1014 if ((IFCAP_VLAN_HWTAGGING & ifp->if_capenable) == 0)
1015 ifp->if_capenable &=
1016 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
1017 msk_setvlan(sc_if, ifp);
1019 if (ifp->if_mtu > ETHERMTU &&
1020 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
1021 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
1022 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1025 VLAN_CAPABILITIES(ifp);
1026 MSK_IF_UNLOCK(sc_if);
1029 error = ether_ioctl(ifp, command, data);
1037 mskc_probe(device_t dev)
1039 struct msk_product *mp;
1040 uint16_t vendor, devid;
1043 vendor = pci_get_vendor(dev);
1044 devid = pci_get_device(dev);
1046 for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]);
1048 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
1049 device_set_desc(dev, mp->msk_name);
1050 return (BUS_PROBE_DEFAULT);
1058 mskc_setup_rambuffer(struct msk_softc *sc)
1063 /* Get adapter SRAM size. */
1064 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
1066 device_printf(sc->msk_dev,
1067 "RAM buffer size : %dKB\n", sc->msk_ramsize);
1068 if (sc->msk_ramsize == 0)
1071 sc->msk_pflags |= MSK_FLAG_RAMBUF;
1073 * Give receiver 2/3 of memory and round down to the multiple
1074 * of 1024. Tx/Rx RAM buffer size of Yukon II should be multiple
1077 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
1078 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
1079 for (i = 0, next = 0; i < sc->msk_num_port; i++) {
1080 sc->msk_rxqstart[i] = next;
1081 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
1082 next = sc->msk_rxqend[i] + 1;
1083 sc->msk_txqstart[i] = next;
1084 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
1085 next = sc->msk_txqend[i] + 1;
1087 device_printf(sc->msk_dev,
1088 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1089 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
1091 device_printf(sc->msk_dev,
1092 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1093 sc->msk_txqsize / 1024, sc->msk_txqstart[i],
1102 msk_phy_power(struct msk_softc *sc, int mode)
1108 case MSK_PHY_POWERUP:
1109 /* Switch power to VCC (WA for VAUX problem). */
1110 CSR_WRITE_1(sc, B0_POWER_CTRL,
1111 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1112 /* Disable Core Clock Division, set Clock Select to 0. */
1113 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1116 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1117 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1118 /* Enable bits are inverted. */
1119 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1120 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1121 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1124 * Enable PCI & Core Clock, enable clock gating for both Links.
1126 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1128 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1129 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1130 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
1131 if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1132 /* Deassert Low Power for 1st PHY. */
1133 val |= PCI_Y2_PHY1_COMA;
1134 if (sc->msk_num_port > 1)
1135 val |= PCI_Y2_PHY2_COMA;
1138 /* Release PHY from PowerDown/COMA mode. */
1139 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
1140 switch (sc->msk_hw_id) {
1141 case CHIP_ID_YUKON_EC_U:
1142 case CHIP_ID_YUKON_EX:
1143 case CHIP_ID_YUKON_FE_P:
1144 case CHIP_ID_YUKON_UL_2:
1145 case CHIP_ID_YUKON_OPT:
1146 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_OFF);
1148 /* Enable all clocks. */
1149 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
1150 our = CSR_PCI_READ_4(sc, PCI_OUR_REG_4);
1151 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
1152 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
1153 /* Set all bits to 0 except bits 15..12. */
1154 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, our);
1155 our = CSR_PCI_READ_4(sc, PCI_OUR_REG_5);
1156 our &= PCI_CTL_TIM_VMAIN_AV_MSK;
1157 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, our);
1158 CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0);
1160 * Disable status race, workaround for
1161 * Yukon EC Ultra & Yukon EX.
1163 val = CSR_READ_4(sc, B2_GP_IO);
1164 val |= GLB_GPIO_STAT_RACE_DIS;
1165 CSR_WRITE_4(sc, B2_GP_IO, val);
1166 CSR_READ_4(sc, B2_GP_IO);
1171 for (i = 0; i < sc->msk_num_port; i++) {
1172 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1174 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1178 case MSK_PHY_POWERDOWN:
1179 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1180 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1181 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1182 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1183 val &= ~PCI_Y2_PHY1_COMA;
1184 if (sc->msk_num_port > 1)
1185 val &= ~PCI_Y2_PHY2_COMA;
1187 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
1189 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1190 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1191 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1192 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1193 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1194 /* Enable bits are inverted. */
1198 * Disable PCI & Core Clock, disable clock gating for
1201 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1202 CSR_WRITE_1(sc, B0_POWER_CTRL,
1203 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1211 mskc_reset(struct msk_softc *sc)
1218 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1221 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) {
1222 status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
1223 /* Clear AHB bridge & microcontroller reset. */
1224 status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
1225 Y2_ASF_HCU_CCSR_CPU_RST_MODE);
1226 /* Clear ASF microcontroller state. */
1227 status &= ~ Y2_ASF_HCU_CCSR_UC_STATE_MSK;
1228 CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
1230 CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1231 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1234 * Since we disabled ASF, S/W reset is required for Power Management.
1236 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1237 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1239 /* Clear all error bits in the PCI status register. */
1240 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1241 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1243 pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1244 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1245 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
1246 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1248 switch (sc->msk_bustype) {
1250 /* Clear all PEX errors. */
1251 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1252 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1253 if ((val & PEX_RX_OV) != 0) {
1254 sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1255 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1260 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
1261 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1263 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1264 if (sc->msk_bustype == MSK_PCIX_BUS) {
1265 /* Set Cache Line Size opt. */
1266 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1268 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1272 /* Set PHY power state. */
1273 msk_phy_power(sc, MSK_PHY_POWERUP);
1275 /* Reset GPHY/GMAC Control */
1276 for (i = 0; i < sc->msk_num_port; i++) {
1277 /* GPHY Control reset. */
1278 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1279 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1280 /* GMAC Control reset. */
1281 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1282 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1283 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1284 if (sc->msk_hw_id == CHIP_ID_YUKON_EX)
1285 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
1286 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
1289 if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) {
1290 /* Disable PCIe PHY powerdown(reg 0x80, bit7). */
1291 CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080);
1293 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1296 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1298 /* Clear TWSI IRQ. */
1299 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1301 /* Turn off hardware timer. */
1302 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1303 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1305 /* Turn off descriptor polling. */
1306 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1308 /* Turn off time stamps. */
1309 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1310 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1312 /* Configure timeout values. */
1313 for (i = 0; i < sc->msk_num_port; i++) {
1314 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1315 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1316 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1318 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1320 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1322 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1324 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1326 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1328 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1330 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1332 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1334 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1336 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1338 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1342 /* Disable all interrupts. */
1343 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1344 CSR_READ_4(sc, B0_HWE_IMSK);
1345 CSR_WRITE_4(sc, B0_IMSK, 0);
1346 CSR_READ_4(sc, B0_IMSK);
1349 * On dual port PCI-X card, there is an problem where status
1350 * can be received out of order due to split transactions.
1352 if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) {
1355 pcix_cmd = pci_read_config(sc->msk_dev,
1356 sc->msk_pcixcap + PCIXR_COMMAND, 2);
1357 /* Clear Max Outstanding Split Transactions. */
1358 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS;
1359 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1360 pci_write_config(sc->msk_dev,
1361 sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2);
1362 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1364 if (sc->msk_expcap != 0) {
1365 /* Change Max. Read Request Size to 2048 bytes. */
1366 if (pci_get_max_read_req(sc->msk_dev) == 512)
1367 pci_set_max_read_req(sc->msk_dev, 2048);
1370 /* Clear status list. */
1371 bzero(sc->msk_stat_ring,
1372 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1373 sc->msk_stat_cons = 0;
1374 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1375 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1376 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1377 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1378 /* Set the status list base address. */
1379 addr = sc->msk_stat_ring_paddr;
1380 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1381 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1382 /* Set the status list last index. */
1383 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1384 if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1385 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1386 /* WA for dev. #4.3 */
1387 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1388 /* WA for dev. #4.18 */
1389 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1390 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1392 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1393 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1394 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1395 sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1396 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1398 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1399 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1402 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1404 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1406 /* Enable status unit. */
1407 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1409 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1410 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1411 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1415 msk_probe(device_t dev)
1417 struct msk_softc *sc;
1420 sc = device_get_softc(device_get_parent(dev));
1422 * Not much to do here. We always know there will be
1423 * at least one GMAC present, and if there are two,
1424 * mskc_attach() will create a second device instance
1427 snprintf(desc, sizeof(desc),
1428 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1429 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1431 device_set_desc_copy(dev, desc);
1433 return (BUS_PROBE_DEFAULT);
1437 msk_attach(device_t dev)
1439 struct msk_softc *sc;
1440 struct msk_if_softc *sc_if;
1442 struct msk_mii_data *mmd;
1450 sc_if = device_get_softc(dev);
1451 sc = device_get_softc(device_get_parent(dev));
1452 mmd = device_get_ivars(dev);
1455 sc_if->msk_if_dev = dev;
1456 sc_if->msk_port = port;
1457 sc_if->msk_softc = sc;
1458 sc_if->msk_flags = sc->msk_pflags;
1459 sc->msk_if[port] = sc_if;
1460 /* Setup Tx/Rx queue register offsets. */
1461 if (port == MSK_PORT_A) {
1462 sc_if->msk_txq = Q_XA1;
1463 sc_if->msk_txsq = Q_XS1;
1464 sc_if->msk_rxq = Q_R1;
1466 sc_if->msk_txq = Q_XA2;
1467 sc_if->msk_txsq = Q_XS2;
1468 sc_if->msk_rxq = Q_R2;
1471 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
1472 msk_sysctl_node(sc_if);
1474 if ((error = msk_txrx_dma_alloc(sc_if) != 0))
1476 msk_rx_dma_jalloc(sc_if);
1478 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
1480 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
1484 ifp->if_softc = sc_if;
1485 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1486 ifp->if_mtu = ETHERMTU;
1487 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1489 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1490 * has serious bug in Rx checksum offload for all Yukon II family
1491 * hardware. It seems there is a workaround to make it work somtimes.
1492 * However, the workaround also have to check OP code sequences to
1493 * verify whether the OP code is correct. Sometimes it should compute
1494 * IP/TCP/UDP checksum in driver in order to verify correctness of
1495 * checksum computed by hardware. If you have to compute checksum
1496 * with software to verify the hardware's checksum why have hardware
1497 * compute the checksum? I think there is no reason to spend time to
1498 * make Rx checksum offload work on Yukon II hardware.
1500 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
1502 * Enable Rx checksum offloading if controller support new
1503 * descriptor format.
1505 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
1506 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
1507 ifp->if_capabilities |= IFCAP_RXCSUM;
1508 ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
1509 ifp->if_capenable = ifp->if_capabilities;
1510 ifp->if_ioctl = msk_ioctl;
1511 ifp->if_start = msk_start;
1513 ifp->if_watchdog = NULL;
1514 ifp->if_init = msk_init;
1515 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1516 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
1517 IFQ_SET_READY(&ifp->if_snd);
1519 * Get station address for this interface. Note that
1520 * dual port cards actually come with three station
1521 * addresses: one for each port, plus an extra. The
1522 * extra one is used by the SysKonnect driver software
1523 * as a 'virtual' station address for when both ports
1524 * are operating in failover mode. Currently we don't
1525 * use this extra address.
1528 for (i = 0; i < ETHER_ADDR_LEN; i++)
1529 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1532 * Call MI attach routine. Can't hold locks when calling into ether_*.
1534 MSK_IF_UNLOCK(sc_if);
1535 ether_ifattach(ifp, eaddr);
1538 /* VLAN capability setup */
1539 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1540 if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) {
1542 * Due to Tx checksum offload hardware bugs, msk(4) manually
1543 * computes checksum for short frames. For VLAN tagged frames
1544 * this workaround does not work so disable checksum offload
1545 * for VLAN interface.
1547 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO;
1549 * Enable Rx checksum offloading for VLAN tagged frames
1550 * if controller support new descriptor format.
1552 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
1553 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
1554 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1556 ifp->if_capenable = ifp->if_capabilities;
1559 * Tell the upper layer(s) we support long frames.
1560 * Must appear after the call to ether_ifattach() because
1561 * ether_ifattach() sets ifi_hdrlen to the default value.
1563 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1568 MSK_IF_UNLOCK(sc_if);
1569 error = mii_attach(dev, &sc_if->msk_miibus, ifp, msk_mediachange,
1570 msk_mediastatus, BMSR_DEFCAPMASK, PHY_ADDR_MARV, MII_OFFSET_ANY,
1573 device_printf(sc_if->msk_if_dev, "attaching PHYs failed\n");
1574 ether_ifdetach(ifp);
1581 /* Access should be ok even though lock has been dropped */
1582 sc->msk_if[port] = NULL;
1590 * Attach the interface. Allocate softc structures, do ifmedia
1591 * setup and ethernet/BPF attach.
1594 mskc_attach(device_t dev)
1596 struct msk_softc *sc;
1597 struct msk_mii_data *mmd;
1598 int error, msic, msir, reg;
1600 sc = device_get_softc(dev);
1602 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1606 * Map control/status registers.
1608 pci_enable_busmaster(dev);
1610 /* Allocate I/O resource */
1611 #ifdef MSK_USEIOSPACE
1612 sc->msk_res_spec = msk_res_spec_io;
1614 sc->msk_res_spec = msk_res_spec_mem;
1616 sc->msk_irq_spec = msk_irq_spec_legacy;
1617 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1619 if (sc->msk_res_spec == msk_res_spec_mem)
1620 sc->msk_res_spec = msk_res_spec_io;
1622 sc->msk_res_spec = msk_res_spec_mem;
1623 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1625 device_printf(dev, "couldn't allocate %s resources\n",
1626 sc->msk_res_spec == msk_res_spec_mem ? "memory" :
1628 mtx_destroy(&sc->msk_mtx);
1633 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1634 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1635 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1636 /* Bail out if chip is not recognized. */
1637 if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1638 sc->msk_hw_id > CHIP_ID_YUKON_OPT ||
1639 sc->msk_hw_id == CHIP_ID_YUKON_SUPR ||
1640 sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) {
1641 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1642 sc->msk_hw_id, sc->msk_hw_rev);
1643 mtx_destroy(&sc->msk_mtx);
1647 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1648 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1649 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1650 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
1651 "max number of Rx events to process");
1653 sc->msk_process_limit = MSK_PROC_DEFAULT;
1654 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1655 "process_limit", &sc->msk_process_limit);
1657 if (sc->msk_process_limit < MSK_PROC_MIN ||
1658 sc->msk_process_limit > MSK_PROC_MAX) {
1659 device_printf(dev, "process_limit value out of range; "
1660 "using default: %d\n", MSK_PROC_DEFAULT);
1661 sc->msk_process_limit = MSK_PROC_DEFAULT;
1665 sc->msk_int_holdoff = MSK_INT_HOLDOFF_DEFAULT;
1666 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1667 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
1668 "int_holdoff", CTLFLAG_RW, &sc->msk_int_holdoff, 0,
1669 "Maximum number of time to delay interrupts");
1670 resource_int_value(device_get_name(dev), device_get_unit(dev),
1671 "int_holdoff", &sc->msk_int_holdoff);
1674 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1675 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1676 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1677 /* Check number of MACs. */
1678 sc->msk_num_port = 1;
1679 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1681 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1685 /* Check bus type. */
1686 if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, ®) == 0) {
1687 sc->msk_bustype = MSK_PEX_BUS;
1688 sc->msk_expcap = reg;
1689 } else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, ®) == 0) {
1690 sc->msk_bustype = MSK_PCIX_BUS;
1691 sc->msk_pcixcap = reg;
1693 sc->msk_bustype = MSK_PCI_BUS;
1695 switch (sc->msk_hw_id) {
1696 case CHIP_ID_YUKON_EC:
1697 sc->msk_clock = 125; /* 125 MHz */
1698 sc->msk_pflags |= MSK_FLAG_JUMBO;
1700 case CHIP_ID_YUKON_EC_U:
1701 sc->msk_clock = 125; /* 125 MHz */
1702 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM;
1704 case CHIP_ID_YUKON_EX:
1705 sc->msk_clock = 125; /* 125 MHz */
1706 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
1707 MSK_FLAG_AUTOTX_CSUM;
1709 * Yukon Extreme seems to have silicon bug for
1710 * automatic Tx checksum calculation capability.
1712 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
1713 sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM;
1715 * Yukon Extreme A0 could not use store-and-forward
1716 * for jumbo frames, so disable Tx checksum
1717 * offloading for jumbo frames.
1719 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
1720 sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM;
1722 case CHIP_ID_YUKON_FE:
1723 sc->msk_clock = 100; /* 100 MHz */
1724 sc->msk_pflags |= MSK_FLAG_FASTETHER;
1726 case CHIP_ID_YUKON_FE_P:
1727 sc->msk_clock = 50; /* 50 MHz */
1728 sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 |
1729 MSK_FLAG_AUTOTX_CSUM;
1730 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
1733 * FE+ A0 has status LE writeback bug so msk(4)
1734 * does not rely on status word of received frame
1735 * in msk_rxeof() which in turn disables all
1736 * hardware assistance bits reported by the status
1737 * word as well as validity of the received frame.
1738 * Just pass received frames to upper stack with
1739 * minimal test and let upper stack handle them.
1741 sc->msk_pflags |= MSK_FLAG_NOHWVLAN |
1742 MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM;
1745 case CHIP_ID_YUKON_XL:
1746 sc->msk_clock = 156; /* 156 MHz */
1747 sc->msk_pflags |= MSK_FLAG_JUMBO;
1749 case CHIP_ID_YUKON_UL_2:
1750 sc->msk_clock = 125; /* 125 MHz */
1751 sc->msk_pflags |= MSK_FLAG_JUMBO;
1753 case CHIP_ID_YUKON_OPT:
1754 sc->msk_clock = 125; /* 125 MHz */
1755 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2;
1758 sc->msk_clock = 156; /* 156 MHz */
1762 /* Allocate IRQ resources. */
1763 msic = pci_msi_count(dev);
1765 device_printf(dev, "MSI count : %d\n", msic);
1766 if (legacy_intr != 0)
1768 if (msi_disable == 0 && msic > 0) {
1770 if (pci_alloc_msi(dev, &msir) == 0) {
1772 sc->msk_pflags |= MSK_FLAG_MSI;
1773 sc->msk_irq_spec = msk_irq_spec_msi;
1775 pci_release_msi(dev);
1779 error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1781 device_printf(dev, "couldn't allocate IRQ resources\n");
1785 if ((error = msk_status_dma_alloc(sc)) != 0)
1788 /* Set base interrupt mask. */
1789 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1790 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1791 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1793 /* Reset the adapter. */
1796 if ((error = mskc_setup_rambuffer(sc)) != 0)
1799 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1800 if (sc->msk_devs[MSK_PORT_A] == NULL) {
1801 device_printf(dev, "failed to add child for PORT_A\n");
1805 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO);
1807 device_printf(dev, "failed to allocate memory for "
1808 "ivars of PORT_A\n");
1812 mmd->port = MSK_PORT_A;
1813 mmd->pmd = sc->msk_pmd;
1814 mmd->mii_flags |= MIIF_DOPAUSE | MIIF_FORCEPAUSE;
1815 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1816 mmd->mii_flags |= MIIF_HAVEFIBER;
1817 if (sc->msk_pmd == 'P')
1818 mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
1819 device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd);
1821 if (sc->msk_num_port > 1) {
1822 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1823 if (sc->msk_devs[MSK_PORT_B] == NULL) {
1824 device_printf(dev, "failed to add child for PORT_B\n");
1828 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO);
1830 device_printf(dev, "failed to allocate memory for "
1831 "ivars of PORT_B\n");
1835 mmd->port = MSK_PORT_B;
1836 mmd->pmd = sc->msk_pmd;
1837 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1838 mmd->mii_flags |= MIIF_HAVEFIBER;
1839 if (sc->msk_pmd == 'P')
1840 mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
1841 device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd);
1844 error = bus_generic_attach(dev);
1846 device_printf(dev, "failed to attach port(s)\n");
1850 /* Hook interrupt last to avoid having to lock softc. */
1851 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1852 INTR_MPSAFE, NULL, msk_intr, sc, &sc->msk_intrhand);
1854 device_printf(dev, "couldn't set up interrupt handler\n");
1865 * Shutdown hardware and free up resources. This can be called any
1866 * time after the mutex has been initialized. It is called in both
1867 * the error case in attach and the normal detach case so it needs
1868 * to be careful about only freeing resources that have actually been
1872 msk_detach(device_t dev)
1874 struct msk_softc *sc;
1875 struct msk_if_softc *sc_if;
1878 sc_if = device_get_softc(dev);
1879 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
1880 ("msk mutex not initialized in msk_detach"));
1883 ifp = sc_if->msk_ifp;
1884 if (device_is_attached(dev)) {
1886 sc_if->msk_flags |= MSK_FLAG_DETACH;
1888 /* Can't hold locks while calling detach. */
1889 MSK_IF_UNLOCK(sc_if);
1890 callout_drain(&sc_if->msk_tick_ch);
1891 ether_ifdetach(ifp);
1896 * We're generally called from mskc_detach() which is using
1897 * device_delete_child() to get to here. It's already trashed
1898 * miibus for us, so don't do it here or we'll panic.
1900 * if (sc_if->msk_miibus != NULL) {
1901 * device_delete_child(dev, sc_if->msk_miibus);
1902 * sc_if->msk_miibus = NULL;
1906 msk_rx_dma_jfree(sc_if);
1907 msk_txrx_dma_free(sc_if);
1908 bus_generic_detach(dev);
1912 sc = sc_if->msk_softc;
1913 sc->msk_if[sc_if->msk_port] = NULL;
1914 MSK_IF_UNLOCK(sc_if);
1920 mskc_detach(device_t dev)
1922 struct msk_softc *sc;
1924 sc = device_get_softc(dev);
1925 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
1927 if (device_is_alive(dev)) {
1928 if (sc->msk_devs[MSK_PORT_A] != NULL) {
1929 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
1931 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
1933 if (sc->msk_devs[MSK_PORT_B] != NULL) {
1934 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
1936 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
1938 bus_generic_detach(dev);
1941 /* Disable all interrupts. */
1942 CSR_WRITE_4(sc, B0_IMSK, 0);
1943 CSR_READ_4(sc, B0_IMSK);
1944 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1945 CSR_READ_4(sc, B0_HWE_IMSK);
1948 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
1950 /* Put hardware reset. */
1951 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1953 msk_status_dma_free(sc);
1955 if (sc->msk_intrhand) {
1956 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand);
1957 sc->msk_intrhand = NULL;
1959 bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1960 if ((sc->msk_pflags & MSK_FLAG_MSI) != 0)
1961 pci_release_msi(dev);
1962 bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
1963 mtx_destroy(&sc->msk_mtx);
1968 struct msk_dmamap_arg {
1969 bus_addr_t msk_busaddr;
1973 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1975 struct msk_dmamap_arg *ctx;
1980 ctx->msk_busaddr = segs[0].ds_addr;
1983 /* Create status DMA region. */
1985 msk_status_dma_alloc(struct msk_softc *sc)
1987 struct msk_dmamap_arg ctx;
1990 error = bus_dma_tag_create(
1991 bus_get_dma_tag(sc->msk_dev), /* parent */
1992 MSK_STAT_ALIGN, 0, /* alignment, boundary */
1993 BUS_SPACE_MAXADDR, /* lowaddr */
1994 BUS_SPACE_MAXADDR, /* highaddr */
1995 NULL, NULL, /* filter, filterarg */
1996 MSK_STAT_RING_SZ, /* maxsize */
1998 MSK_STAT_RING_SZ, /* maxsegsize */
2000 NULL, NULL, /* lockfunc, lockarg */
2003 device_printf(sc->msk_dev,
2004 "failed to create status DMA tag\n");
2008 /* Allocate DMA'able memory and load the DMA map for status ring. */
2009 error = bus_dmamem_alloc(sc->msk_stat_tag,
2010 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
2011 BUS_DMA_ZERO, &sc->msk_stat_map);
2013 device_printf(sc->msk_dev,
2014 "failed to allocate DMA'able memory for status ring\n");
2018 ctx.msk_busaddr = 0;
2019 error = bus_dmamap_load(sc->msk_stat_tag,
2020 sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ,
2021 msk_dmamap_cb, &ctx, 0);
2023 device_printf(sc->msk_dev,
2024 "failed to load DMA'able memory for status ring\n");
2027 sc->msk_stat_ring_paddr = ctx.msk_busaddr;
2033 msk_status_dma_free(struct msk_softc *sc)
2036 /* Destroy status block. */
2037 if (sc->msk_stat_tag) {
2038 if (sc->msk_stat_map) {
2039 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
2040 if (sc->msk_stat_ring) {
2041 bus_dmamem_free(sc->msk_stat_tag,
2042 sc->msk_stat_ring, sc->msk_stat_map);
2043 sc->msk_stat_ring = NULL;
2045 sc->msk_stat_map = NULL;
2047 bus_dma_tag_destroy(sc->msk_stat_tag);
2048 sc->msk_stat_tag = NULL;
2053 msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
2055 struct msk_dmamap_arg ctx;
2056 struct msk_txdesc *txd;
2057 struct msk_rxdesc *rxd;
2061 /* Create parent DMA tag. */
2064 * It seems that Yukon II supports full 64bits DMA operations. But
2065 * it needs two descriptors(list elements) for 64bits DMA operations.
2066 * Since we don't know what DMA address mappings(32bits or 64bits)
2067 * would be used in advance for each mbufs, we limits its DMA space
2068 * to be in range of 32bits address space. Otherwise, we should check
2069 * what DMA address is used and chain another descriptor for the
2070 * 64bits DMA operation. This also means descriptor ring size is
2071 * variable. Limiting DMA address to be in 32bit address space greatly
2072 * simplifies descriptor handling and possibly would increase
2073 * performance a bit due to efficient handling of descriptors.
2074 * Apart from harassing checksum offloading mechanisms, it seems
2075 * it's really bad idea to use a separate descriptor for 64bit
2076 * DMA operation to save small descriptor memory. Anyway, I've
2077 * never seen these exotic scheme on ethernet interface hardware.
2079 error = bus_dma_tag_create(
2080 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
2081 1, 0, /* alignment, boundary */
2082 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2083 BUS_SPACE_MAXADDR, /* highaddr */
2084 NULL, NULL, /* filter, filterarg */
2085 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2087 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
2089 NULL, NULL, /* lockfunc, lockarg */
2090 &sc_if->msk_cdata.msk_parent_tag);
2092 device_printf(sc_if->msk_if_dev,
2093 "failed to create parent DMA tag\n");
2096 /* Create tag for Tx ring. */
2097 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2098 MSK_RING_ALIGN, 0, /* alignment, boundary */
2099 BUS_SPACE_MAXADDR, /* lowaddr */
2100 BUS_SPACE_MAXADDR, /* highaddr */
2101 NULL, NULL, /* filter, filterarg */
2102 MSK_TX_RING_SZ, /* maxsize */
2104 MSK_TX_RING_SZ, /* maxsegsize */
2106 NULL, NULL, /* lockfunc, lockarg */
2107 &sc_if->msk_cdata.msk_tx_ring_tag);
2109 device_printf(sc_if->msk_if_dev,
2110 "failed to create Tx ring DMA tag\n");
2114 /* Create tag for Rx ring. */
2115 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2116 MSK_RING_ALIGN, 0, /* alignment, boundary */
2117 BUS_SPACE_MAXADDR, /* lowaddr */
2118 BUS_SPACE_MAXADDR, /* highaddr */
2119 NULL, NULL, /* filter, filterarg */
2120 MSK_RX_RING_SZ, /* maxsize */
2122 MSK_RX_RING_SZ, /* maxsegsize */
2124 NULL, NULL, /* lockfunc, lockarg */
2125 &sc_if->msk_cdata.msk_rx_ring_tag);
2127 device_printf(sc_if->msk_if_dev,
2128 "failed to create Rx ring DMA tag\n");
2132 /* Create tag for Tx buffers. */
2133 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2134 1, 0, /* alignment, boundary */
2135 BUS_SPACE_MAXADDR, /* lowaddr */
2136 BUS_SPACE_MAXADDR, /* highaddr */
2137 NULL, NULL, /* filter, filterarg */
2138 MSK_TSO_MAXSIZE, /* maxsize */
2139 MSK_MAXTXSEGS, /* nsegments */
2140 MSK_TSO_MAXSGSIZE, /* maxsegsize */
2142 NULL, NULL, /* lockfunc, lockarg */
2143 &sc_if->msk_cdata.msk_tx_tag);
2145 device_printf(sc_if->msk_if_dev,
2146 "failed to create Tx DMA tag\n");
2152 * Workaround hardware hang which seems to happen when Rx buffer
2153 * is not aligned on multiple of FIFO word(8 bytes).
2155 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2156 rxalign = MSK_RX_BUF_ALIGN;
2157 /* Create tag for Rx buffers. */
2158 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2159 rxalign, 0, /* alignment, boundary */
2160 BUS_SPACE_MAXADDR, /* lowaddr */
2161 BUS_SPACE_MAXADDR, /* highaddr */
2162 NULL, NULL, /* filter, filterarg */
2163 MCLBYTES, /* maxsize */
2165 MCLBYTES, /* maxsegsize */
2167 NULL, NULL, /* lockfunc, lockarg */
2168 &sc_if->msk_cdata.msk_rx_tag);
2170 device_printf(sc_if->msk_if_dev,
2171 "failed to create Rx DMA tag\n");
2175 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
2176 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
2177 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
2178 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
2180 device_printf(sc_if->msk_if_dev,
2181 "failed to allocate DMA'able memory for Tx ring\n");
2185 ctx.msk_busaddr = 0;
2186 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
2187 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
2188 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2190 device_printf(sc_if->msk_if_dev,
2191 "failed to load DMA'able memory for Tx ring\n");
2194 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
2196 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
2197 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
2198 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
2199 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
2201 device_printf(sc_if->msk_if_dev,
2202 "failed to allocate DMA'able memory for Rx ring\n");
2206 ctx.msk_busaddr = 0;
2207 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
2208 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
2209 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2211 device_printf(sc_if->msk_if_dev,
2212 "failed to load DMA'able memory for Rx ring\n");
2215 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
2217 /* Create DMA maps for Tx buffers. */
2218 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2219 txd = &sc_if->msk_cdata.msk_txdesc[i];
2221 txd->tx_dmamap = NULL;
2222 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
2225 device_printf(sc_if->msk_if_dev,
2226 "failed to create Tx dmamap\n");
2230 /* Create DMA maps for Rx buffers. */
2231 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2232 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
2233 device_printf(sc_if->msk_if_dev,
2234 "failed to create spare Rx dmamap\n");
2237 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2238 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2240 rxd->rx_dmamap = NULL;
2241 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2244 device_printf(sc_if->msk_if_dev,
2245 "failed to create Rx dmamap\n");
2255 msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
2257 struct msk_dmamap_arg ctx;
2258 struct msk_rxdesc *jrxd;
2262 if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
2263 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2264 device_printf(sc_if->msk_if_dev,
2265 "disabling jumbo frame support\n");
2268 /* Create tag for jumbo Rx ring. */
2269 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2270 MSK_RING_ALIGN, 0, /* alignment, boundary */
2271 BUS_SPACE_MAXADDR, /* lowaddr */
2272 BUS_SPACE_MAXADDR, /* highaddr */
2273 NULL, NULL, /* filter, filterarg */
2274 MSK_JUMBO_RX_RING_SZ, /* maxsize */
2276 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */
2278 NULL, NULL, /* lockfunc, lockarg */
2279 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2281 device_printf(sc_if->msk_if_dev,
2282 "failed to create jumbo Rx ring DMA tag\n");
2288 * Workaround hardware hang which seems to happen when Rx buffer
2289 * is not aligned on multiple of FIFO word(8 bytes).
2291 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2292 rxalign = MSK_RX_BUF_ALIGN;
2293 /* Create tag for jumbo Rx buffers. */
2294 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2295 rxalign, 0, /* alignment, boundary */
2296 BUS_SPACE_MAXADDR, /* lowaddr */
2297 BUS_SPACE_MAXADDR, /* highaddr */
2298 NULL, NULL, /* filter, filterarg */
2299 MJUM9BYTES, /* maxsize */
2301 MJUM9BYTES, /* maxsegsize */
2303 NULL, NULL, /* lockfunc, lockarg */
2304 &sc_if->msk_cdata.msk_jumbo_rx_tag);
2306 device_printf(sc_if->msk_if_dev,
2307 "failed to create jumbo Rx DMA tag\n");
2311 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2312 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2313 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2314 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2315 &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2317 device_printf(sc_if->msk_if_dev,
2318 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2322 ctx.msk_busaddr = 0;
2323 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2324 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2325 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2326 msk_dmamap_cb, &ctx, 0);
2328 device_printf(sc_if->msk_if_dev,
2329 "failed to load DMA'able memory for jumbo Rx ring\n");
2332 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2334 /* Create DMA maps for jumbo Rx buffers. */
2335 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2336 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2337 device_printf(sc_if->msk_if_dev,
2338 "failed to create spare jumbo Rx dmamap\n");
2341 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2342 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2344 jrxd->rx_dmamap = NULL;
2345 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2348 device_printf(sc_if->msk_if_dev,
2349 "failed to create jumbo Rx dmamap\n");
2357 msk_rx_dma_jfree(sc_if);
2358 device_printf(sc_if->msk_if_dev, "disabling jumbo frame support "
2359 "due to resource shortage\n");
2360 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2365 msk_txrx_dma_free(struct msk_if_softc *sc_if)
2367 struct msk_txdesc *txd;
2368 struct msk_rxdesc *rxd;
2372 if (sc_if->msk_cdata.msk_tx_ring_tag) {
2373 if (sc_if->msk_cdata.msk_tx_ring_map)
2374 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
2375 sc_if->msk_cdata.msk_tx_ring_map);
2376 if (sc_if->msk_cdata.msk_tx_ring_map &&
2377 sc_if->msk_rdata.msk_tx_ring)
2378 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
2379 sc_if->msk_rdata.msk_tx_ring,
2380 sc_if->msk_cdata.msk_tx_ring_map);
2381 sc_if->msk_rdata.msk_tx_ring = NULL;
2382 sc_if->msk_cdata.msk_tx_ring_map = NULL;
2383 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
2384 sc_if->msk_cdata.msk_tx_ring_tag = NULL;
2387 if (sc_if->msk_cdata.msk_rx_ring_tag) {
2388 if (sc_if->msk_cdata.msk_rx_ring_map)
2389 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
2390 sc_if->msk_cdata.msk_rx_ring_map);
2391 if (sc_if->msk_cdata.msk_rx_ring_map &&
2392 sc_if->msk_rdata.msk_rx_ring)
2393 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
2394 sc_if->msk_rdata.msk_rx_ring,
2395 sc_if->msk_cdata.msk_rx_ring_map);
2396 sc_if->msk_rdata.msk_rx_ring = NULL;
2397 sc_if->msk_cdata.msk_rx_ring_map = NULL;
2398 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
2399 sc_if->msk_cdata.msk_rx_ring_tag = NULL;
2402 if (sc_if->msk_cdata.msk_tx_tag) {
2403 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2404 txd = &sc_if->msk_cdata.msk_txdesc[i];
2405 if (txd->tx_dmamap) {
2406 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2408 txd->tx_dmamap = NULL;
2411 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2412 sc_if->msk_cdata.msk_tx_tag = NULL;
2415 if (sc_if->msk_cdata.msk_rx_tag) {
2416 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2417 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2418 if (rxd->rx_dmamap) {
2419 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2421 rxd->rx_dmamap = NULL;
2424 if (sc_if->msk_cdata.msk_rx_sparemap) {
2425 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2426 sc_if->msk_cdata.msk_rx_sparemap);
2427 sc_if->msk_cdata.msk_rx_sparemap = 0;
2429 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2430 sc_if->msk_cdata.msk_rx_tag = NULL;
2432 if (sc_if->msk_cdata.msk_parent_tag) {
2433 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2434 sc_if->msk_cdata.msk_parent_tag = NULL;
2439 msk_rx_dma_jfree(struct msk_if_softc *sc_if)
2441 struct msk_rxdesc *jrxd;
2444 /* Jumbo Rx ring. */
2445 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2446 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2447 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2448 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2449 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2450 sc_if->msk_rdata.msk_jumbo_rx_ring)
2451 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2452 sc_if->msk_rdata.msk_jumbo_rx_ring,
2453 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2454 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2455 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2456 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2457 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2459 /* Jumbo Rx buffers. */
2460 if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2461 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2462 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2463 if (jrxd->rx_dmamap) {
2465 sc_if->msk_cdata.msk_jumbo_rx_tag,
2467 jrxd->rx_dmamap = NULL;
2470 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2471 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2472 sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2473 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2475 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2476 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2481 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2483 struct msk_txdesc *txd, *txd_last;
2484 struct msk_tx_desc *tx_le;
2487 bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2488 uint32_t control, csum, prod, si;
2489 uint16_t offset, tcp_offset, tso_mtu;
2490 int error, i, nseg, tso;
2492 MSK_IF_LOCK_ASSERT(sc_if);
2494 tcp_offset = offset = 0;
2496 if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
2497 (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) ||
2498 ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
2499 (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) {
2501 * Since mbuf has no protocol specific structure information
2502 * in it we have to inspect protocol information here to
2503 * setup TSO and checksum offload. I don't know why Marvell
2504 * made a such decision in chip design because other GigE
2505 * hardwares normally takes care of all these chores in
2506 * hardware. However, TSO performance of Yukon II is very
2507 * good such that it's worth to implement it.
2509 struct ether_header *eh;
2513 if (M_WRITABLE(m) == 0) {
2514 /* Get a writable copy. */
2515 m = m_dup(*m_head, M_DONTWAIT);
2524 offset = sizeof(struct ether_header);
2525 m = m_pullup(m, offset);
2530 eh = mtod(m, struct ether_header *);
2531 /* Check if hardware VLAN insertion is off. */
2532 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2533 offset = sizeof(struct ether_vlan_header);
2534 m = m_pullup(m, offset);
2540 m = m_pullup(m, offset + sizeof(struct ip));
2545 ip = (struct ip *)(mtod(m, char *) + offset);
2546 offset += (ip->ip_hl << 2);
2547 tcp_offset = offset;
2548 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2549 m = m_pullup(m, offset + sizeof(struct tcphdr));
2554 tcp = (struct tcphdr *)(mtod(m, char *) + offset);
2555 offset += (tcp->th_off << 2);
2556 } else if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
2557 (m->m_pkthdr.len < MSK_MIN_FRAMELEN) &&
2558 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2560 * It seems that Yukon II has Tx checksum offload bug
2561 * for small TCP packets that's less than 60 bytes in
2562 * size (e.g. TCP window probe packet, pure ACK packet).
2563 * Common work around like padding with zeros to make
2564 * the frame minimum ethernet frame size didn't work at
2566 * Instead of disabling checksum offload completely we
2567 * resort to S/W checksum routine when we encounter
2569 * Short UDP packets appear to be handled correctly by
2570 * Yukon II. Also I assume this bug does not happen on
2571 * controllers that use newer descriptor format or
2572 * automatic Tx checksum calculation.
2574 m = m_pullup(m, offset + sizeof(struct tcphdr));
2579 *(uint16_t *)(m->m_data + offset +
2580 m->m_pkthdr.csum_data) = in_cksum_skip(m,
2581 m->m_pkthdr.len, offset);
2582 m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2587 prod = sc_if->msk_cdata.msk_tx_prod;
2588 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2590 map = txd->tx_dmamap;
2591 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
2592 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2593 if (error == EFBIG) {
2594 m = m_collapse(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
2601 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
2602 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2608 } else if (error != 0)
2616 /* Check number of available descriptors. */
2617 if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
2618 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2619 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2627 /* Check TSO support. */
2628 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2629 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2630 tso_mtu = m->m_pkthdr.tso_segsz;
2632 tso_mtu = offset + m->m_pkthdr.tso_segsz;
2633 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
2634 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2635 tx_le->msk_addr = htole32(tso_mtu);
2636 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2637 tx_le->msk_control = htole32(OP_MSS | HW_OWNER);
2639 tx_le->msk_control =
2640 htole32(OP_LRGLEN | HW_OWNER);
2641 sc_if->msk_cdata.msk_tx_cnt++;
2642 MSK_INC(prod, MSK_TX_RING_CNT);
2643 sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
2647 /* Check if we have a VLAN tag to insert. */
2648 if ((m->m_flags & M_VLANTAG) != 0) {
2649 if (tx_le == NULL) {
2650 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2651 tx_le->msk_addr = htole32(0);
2652 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2653 htons(m->m_pkthdr.ether_vtag));
2654 sc_if->msk_cdata.msk_tx_cnt++;
2655 MSK_INC(prod, MSK_TX_RING_CNT);
2657 tx_le->msk_control |= htole32(OP_VLAN |
2658 htons(m->m_pkthdr.ether_vtag));
2660 control |= INS_VLAN;
2662 /* Check if we have to handle checksum offload. */
2663 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
2664 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0)
2667 control |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2668 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2670 /* Checksum write position. */
2671 csum = (tcp_offset + m->m_pkthdr.csum_data) & 0xffff;
2672 /* Checksum start position. */
2673 csum |= (uint32_t)tcp_offset << 16;
2674 if (csum != sc_if->msk_cdata.msk_last_csum) {
2675 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2676 tx_le->msk_addr = htole32(csum);
2677 tx_le->msk_control = htole32(1 << 16 |
2678 (OP_TCPLISW | HW_OWNER));
2679 sc_if->msk_cdata.msk_tx_cnt++;
2680 MSK_INC(prod, MSK_TX_RING_CNT);
2681 sc_if->msk_cdata.msk_last_csum = csum;
2687 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2688 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2690 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2693 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2695 sc_if->msk_cdata.msk_tx_cnt++;
2696 MSK_INC(prod, MSK_TX_RING_CNT);
2698 for (i = 1; i < nseg; i++) {
2699 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2700 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2701 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2702 OP_BUFFER | HW_OWNER);
2703 sc_if->msk_cdata.msk_tx_cnt++;
2704 MSK_INC(prod, MSK_TX_RING_CNT);
2706 /* Update producer index. */
2707 sc_if->msk_cdata.msk_tx_prod = prod;
2709 /* Set EOP on the last descriptor. */
2710 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2711 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2712 tx_le->msk_control |= htole32(EOP);
2714 /* Turn the first descriptor ownership to hardware. */
2715 tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2716 tx_le->msk_control |= htole32(HW_OWNER);
2718 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2719 map = txd_last->tx_dmamap;
2720 txd_last->tx_dmamap = txd->tx_dmamap;
2721 txd->tx_dmamap = map;
2724 /* Sync descriptors. */
2725 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2726 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2727 sc_if->msk_cdata.msk_tx_ring_map,
2728 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2734 msk_start(struct ifnet *ifp)
2736 struct msk_if_softc *sc_if;
2738 sc_if = ifp->if_softc;
2740 msk_start_locked(ifp);
2741 MSK_IF_UNLOCK(sc_if);
2745 msk_start_locked(struct ifnet *ifp)
2747 struct msk_if_softc *sc_if;
2748 struct mbuf *m_head;
2751 sc_if = ifp->if_softc;
2752 MSK_IF_LOCK_ASSERT(sc_if);
2754 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2755 IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0)
2758 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2759 sc_if->msk_cdata.msk_tx_cnt <
2760 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2761 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2765 * Pack the data into the transmit ring. If we
2766 * don't have room, set the OACTIVE flag and wait
2767 * for the NIC to drain the ring.
2769 if (msk_encap(sc_if, &m_head) != 0) {
2772 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2773 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2779 * If there's a BPF listener, bounce a copy of this frame
2782 ETHER_BPF_MTAP(ifp, m_head);
2787 CSR_WRITE_2(sc_if->msk_softc,
2788 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2789 sc_if->msk_cdata.msk_tx_prod);
2791 /* Set a timeout in case the chip goes out to lunch. */
2792 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
2797 msk_watchdog(struct msk_if_softc *sc_if)
2801 MSK_IF_LOCK_ASSERT(sc_if);
2803 if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
2805 ifp = sc_if->msk_ifp;
2806 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
2808 if_printf(sc_if->msk_ifp, "watchdog timeout "
2811 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2812 msk_init_locked(sc_if);
2816 if_printf(ifp, "watchdog timeout\n");
2818 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2819 msk_init_locked(sc_if);
2820 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2821 msk_start_locked(ifp);
2825 mskc_shutdown(device_t dev)
2827 struct msk_softc *sc;
2830 sc = device_get_softc(dev);
2832 for (i = 0; i < sc->msk_num_port; i++) {
2833 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2834 ((sc->msk_if[i]->msk_ifp->if_drv_flags &
2835 IFF_DRV_RUNNING) != 0))
2836 msk_stop(sc->msk_if[i]);
2840 /* Put hardware reset. */
2841 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2846 mskc_suspend(device_t dev)
2848 struct msk_softc *sc;
2851 sc = device_get_softc(dev);
2855 for (i = 0; i < sc->msk_num_port; i++) {
2856 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2857 ((sc->msk_if[i]->msk_ifp->if_drv_flags &
2858 IFF_DRV_RUNNING) != 0))
2859 msk_stop(sc->msk_if[i]);
2862 /* Disable all interrupts. */
2863 CSR_WRITE_4(sc, B0_IMSK, 0);
2864 CSR_READ_4(sc, B0_IMSK);
2865 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2866 CSR_READ_4(sc, B0_HWE_IMSK);
2868 msk_phy_power(sc, MSK_PHY_POWERDOWN);
2870 /* Put hardware reset. */
2871 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2872 sc->msk_pflags |= MSK_FLAG_SUSPEND;
2880 mskc_resume(device_t dev)
2882 struct msk_softc *sc;
2885 sc = device_get_softc(dev);
2890 for (i = 0; i < sc->msk_num_port; i++) {
2891 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2892 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) {
2893 sc->msk_if[i]->msk_ifp->if_drv_flags &=
2895 msk_init_locked(sc->msk_if[i]);
2898 sc->msk_pflags &= ~MSK_FLAG_SUSPEND;
2905 #ifndef __NO_STRICT_ALIGNMENT
2906 static __inline void
2907 msk_fixup_rx(struct mbuf *m)
2910 uint16_t *src, *dst;
2912 src = mtod(m, uint16_t *);
2915 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2918 m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
2923 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
2928 struct msk_rxdesc *rxd;
2931 ifp = sc_if->msk_ifp;
2933 MSK_IF_LOCK_ASSERT(sc_if);
2935 cons = sc_if->msk_cdata.msk_rx_cons;
2937 rxlen = status >> 16;
2938 if ((status & GMR_FS_VLAN) != 0 &&
2939 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2940 rxlen -= ETHER_VLAN_ENCAP_LEN;
2941 if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) {
2943 * For controllers that returns bogus status code
2944 * just do minimal check and let upper stack
2945 * handle this frame.
2947 if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
2949 msk_discard_rxbuf(sc_if, cons);
2952 } else if (len > sc_if->msk_framesize ||
2953 ((status & GMR_FS_ANY_ERR) != 0) ||
2954 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2955 /* Don't count flow-control packet as errors. */
2956 if ((status & GMR_FS_GOOD_FC) == 0)
2958 msk_discard_rxbuf(sc_if, cons);
2961 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
2963 if (msk_newbuf(sc_if, cons) != 0) {
2965 /* Reuse old buffer. */
2966 msk_discard_rxbuf(sc_if, cons);
2969 m->m_pkthdr.rcvif = ifp;
2970 m->m_pkthdr.len = m->m_len = len;
2971 #ifndef __NO_STRICT_ALIGNMENT
2972 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2976 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
2977 (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
2978 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2979 if ((control & CSS_IPV4_CSUM_OK) != 0)
2980 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2981 if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
2982 (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
2983 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2985 m->m_pkthdr.csum_data = 0xffff;
2988 /* Check for VLAN tagged packets. */
2989 if ((status & GMR_FS_VLAN) != 0 &&
2990 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2991 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
2992 m->m_flags |= M_VLANTAG;
2994 MSK_IF_UNLOCK(sc_if);
2995 (*ifp->if_input)(ifp, m);
2999 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
3000 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
3004 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
3009 struct msk_rxdesc *jrxd;
3012 ifp = sc_if->msk_ifp;
3014 MSK_IF_LOCK_ASSERT(sc_if);
3016 cons = sc_if->msk_cdata.msk_rx_cons;
3018 rxlen = status >> 16;
3019 if ((status & GMR_FS_VLAN) != 0 &&
3020 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3021 rxlen -= ETHER_VLAN_ENCAP_LEN;
3022 if (len > sc_if->msk_framesize ||
3023 ((status & GMR_FS_ANY_ERR) != 0) ||
3024 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3025 /* Don't count flow-control packet as errors. */
3026 if ((status & GMR_FS_GOOD_FC) == 0)
3028 msk_discard_jumbo_rxbuf(sc_if, cons);
3031 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
3033 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
3035 /* Reuse old buffer. */
3036 msk_discard_jumbo_rxbuf(sc_if, cons);
3039 m->m_pkthdr.rcvif = ifp;
3040 m->m_pkthdr.len = m->m_len = len;
3041 #ifndef __NO_STRICT_ALIGNMENT
3042 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
3046 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
3047 (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
3048 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3049 if ((control & CSS_IPV4_CSUM_OK) != 0)
3050 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3051 if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
3052 (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
3053 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3055 m->m_pkthdr.csum_data = 0xffff;
3058 /* Check for VLAN tagged packets. */
3059 if ((status & GMR_FS_VLAN) != 0 &&
3060 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3061 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3062 m->m_flags |= M_VLANTAG;
3064 MSK_IF_UNLOCK(sc_if);
3065 (*ifp->if_input)(ifp, m);
3069 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
3070 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
3074 msk_txeof(struct msk_if_softc *sc_if, int idx)
3076 struct msk_txdesc *txd;
3077 struct msk_tx_desc *cur_tx;
3082 MSK_IF_LOCK_ASSERT(sc_if);
3084 ifp = sc_if->msk_ifp;
3086 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
3087 sc_if->msk_cdata.msk_tx_ring_map,
3088 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3090 * Go through our tx ring and free mbufs for those
3091 * frames that have been sent.
3093 cons = sc_if->msk_cdata.msk_tx_cons;
3095 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
3096 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
3099 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
3100 control = le32toh(cur_tx->msk_control);
3101 sc_if->msk_cdata.msk_tx_cnt--;
3102 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3103 if ((control & EOP) == 0)
3105 txd = &sc_if->msk_cdata.msk_txdesc[cons];
3106 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
3107 BUS_DMASYNC_POSTWRITE);
3108 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
3111 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
3118 sc_if->msk_cdata.msk_tx_cons = cons;
3119 if (sc_if->msk_cdata.msk_tx_cnt == 0)
3120 sc_if->msk_watchdog_timer = 0;
3121 /* No need to sync LEs as we didn't update LEs. */
3126 msk_tick(void *xsc_if)
3128 struct msk_if_softc *sc_if;
3129 struct mii_data *mii;
3133 MSK_IF_LOCK_ASSERT(sc_if);
3135 mii = device_get_softc(sc_if->msk_miibus);
3138 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0)
3139 msk_miibus_statchg(sc_if->msk_if_dev);
3140 msk_handle_events(sc_if->msk_softc);
3141 msk_watchdog(sc_if);
3142 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3146 msk_intr_phy(struct msk_if_softc *sc_if)
3150 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3151 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3152 /* Handle FIFO Underrun/Overflow? */
3153 if ((status & PHY_M_IS_FIFO_ERROR))
3154 device_printf(sc_if->msk_if_dev,
3155 "PHY FIFO underrun/overflow.\n");
3159 msk_intr_gmac(struct msk_if_softc *sc_if)
3161 struct msk_softc *sc;
3164 sc = sc_if->msk_softc;
3165 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3167 /* GMAC Rx FIFO overrun. */
3168 if ((status & GM_IS_RX_FF_OR) != 0)
3169 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3171 /* GMAC Tx FIFO underrun. */
3172 if ((status & GM_IS_TX_FF_UR) != 0) {
3173 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3175 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3178 * In case of Tx underrun, we may need to flush/reset
3179 * Tx MAC but that would also require resynchronization
3180 * with status LEs. Reinitializing status LEs would
3181 * affect other port in dual MAC configuration so it
3182 * should be avoided as possible as we can.
3183 * Due to lack of documentation it's all vague guess but
3184 * it needs more investigation.
3190 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3192 struct msk_softc *sc;
3194 sc = sc_if->msk_softc;
3195 if ((status & Y2_IS_PAR_RD1) != 0) {
3196 device_printf(sc_if->msk_if_dev,
3197 "RAM buffer read parity error\n");
3199 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3202 if ((status & Y2_IS_PAR_WR1) != 0) {
3203 device_printf(sc_if->msk_if_dev,
3204 "RAM buffer write parity error\n");
3206 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3209 if ((status & Y2_IS_PAR_MAC1) != 0) {
3210 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3212 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3215 if ((status & Y2_IS_PAR_RX1) != 0) {
3216 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3218 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3220 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3221 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3223 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3228 msk_intr_hwerr(struct msk_softc *sc)
3231 uint32_t tlphead[4];
3233 status = CSR_READ_4(sc, B0_HWE_ISRC);
3234 /* Time Stamp timer overflow. */
3235 if ((status & Y2_IS_TIST_OV) != 0)
3236 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3237 if ((status & Y2_IS_PCI_NEXP) != 0) {
3239 * PCI Express Error occured which is not described in PEX
3241 * This error is also mapped either to Master Abort(
3242 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3243 * can only be cleared there.
3245 device_printf(sc->msk_dev,
3246 "PCI Express protocol violation error\n");
3249 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3252 if ((status & Y2_IS_MST_ERR) != 0)
3253 device_printf(sc->msk_dev,
3254 "unexpected IRQ Status error\n");
3256 device_printf(sc->msk_dev,
3257 "unexpected IRQ Master error\n");
3258 /* Reset all bits in the PCI status register. */
3259 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3260 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3261 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3262 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3263 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
3264 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3267 /* Check for PCI Express Uncorrectable Error. */
3268 if ((status & Y2_IS_PCI_EXP) != 0) {
3272 * On PCI Express bus bridges are called root complexes (RC).
3273 * PCI Express errors are recognized by the root complex too,
3274 * which requests the system to handle the problem. After
3275 * error occurence it may be that no access to the adapter
3276 * may be performed any longer.
3279 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3280 if ((v32 & PEX_UNSUP_REQ) != 0) {
3281 /* Ignore unsupported request error. */
3282 device_printf(sc->msk_dev,
3283 "Uncorrectable PCI Express error\n");
3285 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3288 /* Get TLP header form Log Registers. */
3289 for (i = 0; i < 4; i++)
3290 tlphead[i] = CSR_PCI_READ_4(sc,
3291 PEX_HEADER_LOG + i * 4);
3292 /* Check for vendor defined broadcast message. */
3293 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3294 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3295 CSR_WRITE_4(sc, B0_HWE_IMSK,
3296 sc->msk_intrhwemask);
3297 CSR_READ_4(sc, B0_HWE_IMSK);
3300 /* Clear the interrupt. */
3301 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3302 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3303 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3306 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3307 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3308 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3309 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3312 static __inline void
3313 msk_rxput(struct msk_if_softc *sc_if)
3315 struct msk_softc *sc;
3317 sc = sc_if->msk_softc;
3318 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN))
3320 sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3321 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3322 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3325 sc_if->msk_cdata.msk_rx_ring_tag,
3326 sc_if->msk_cdata.msk_rx_ring_map,
3327 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3328 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3329 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3333 msk_handle_events(struct msk_softc *sc)
3335 struct msk_if_softc *sc_if;
3337 struct msk_stat_desc *sd;
3338 uint32_t control, status;
3339 int cons, len, port, rxprog;
3341 if (sc->msk_stat_cons == CSR_READ_2(sc, STAT_PUT_IDX))
3344 /* Sync status LEs. */
3345 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3346 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3348 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3350 cons = sc->msk_stat_cons;
3352 sd = &sc->msk_stat_ring[cons];
3353 control = le32toh(sd->msk_control);
3354 if ((control & HW_OWNER) == 0)
3356 control &= ~HW_OWNER;
3357 sd->msk_control = htole32(control);
3358 status = le32toh(sd->msk_status);
3359 len = control & STLE_LEN_MASK;
3360 port = (control >> 16) & 0x01;
3361 sc_if = sc->msk_if[port];
3362 if (sc_if == NULL) {
3363 device_printf(sc->msk_dev, "invalid port opcode "
3364 "0x%08x\n", control & STLE_OP_MASK);
3368 switch (control & STLE_OP_MASK) {
3370 sc_if->msk_vtag = ntohs(len);
3373 sc_if->msk_vtag = ntohs(len);
3376 if (!(sc_if->msk_ifp->if_drv_flags & IFF_DRV_RUNNING))
3378 if (sc_if->msk_framesize >
3379 (MCLBYTES - MSK_RX_BUF_ALIGN))
3380 msk_jumbo_rxeof(sc_if, status, control, len);
3382 msk_rxeof(sc_if, status, control, len);
3385 * Because there is no way to sync single Rx LE
3386 * put the DMA sync operation off until the end of
3390 /* Update prefetch unit if we've passed water mark. */
3391 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3397 if (sc->msk_if[MSK_PORT_A] != NULL)
3398 msk_txeof(sc->msk_if[MSK_PORT_A],
3399 status & STLE_TXA1_MSKL);
3400 if (sc->msk_if[MSK_PORT_B] != NULL)
3401 msk_txeof(sc->msk_if[MSK_PORT_B],
3402 ((status & STLE_TXA2_MSKL) >>
3404 ((len & STLE_TXA2_MSKH) <<
3408 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3409 control & STLE_OP_MASK);
3412 MSK_INC(cons, MSK_STAT_RING_CNT);
3413 if (rxprog > sc->msk_process_limit)
3417 sc->msk_stat_cons = cons;
3418 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3419 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3421 if (rxput[MSK_PORT_A] > 0)
3422 msk_rxput(sc->msk_if[MSK_PORT_A]);
3423 if (rxput[MSK_PORT_B] > 0)
3424 msk_rxput(sc->msk_if[MSK_PORT_B]);
3426 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3432 struct msk_softc *sc;
3433 struct msk_if_softc *sc_if0, *sc_if1;
3434 struct ifnet *ifp0, *ifp1;
3441 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3442 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3443 if (status == 0 || status == 0xffffffff ||
3444 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
3445 (status & sc->msk_intrmask) == 0) {
3446 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3451 sc_if0 = sc->msk_if[MSK_PORT_A];
3452 sc_if1 = sc->msk_if[MSK_PORT_B];
3455 ifp0 = sc_if0->msk_ifp;
3457 ifp1 = sc_if1->msk_ifp;
3459 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3460 msk_intr_phy(sc_if0);
3461 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3462 msk_intr_phy(sc_if1);
3463 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3464 msk_intr_gmac(sc_if0);
3465 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3466 msk_intr_gmac(sc_if1);
3467 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3468 device_printf(sc->msk_dev, "Rx descriptor error\n");
3469 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3470 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3471 CSR_READ_4(sc, B0_IMSK);
3473 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3474 device_printf(sc->msk_dev, "Tx descriptor error\n");
3475 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3476 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3477 CSR_READ_4(sc, B0_IMSK);
3479 if ((status & Y2_IS_HW_ERR) != 0)
3482 domore = msk_handle_events(sc);
3483 if ((status & Y2_IS_STAT_BMU) != 0 && domore == 0)
3484 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3486 /* Reenable interrupts. */
3487 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3489 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3490 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3491 msk_start_locked(ifp0);
3492 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3493 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3494 msk_start_locked(ifp1);
3500 msk_set_tx_stfwd(struct msk_if_softc *sc_if)
3502 struct msk_softc *sc;
3505 ifp = sc_if->msk_ifp;
3506 sc = sc_if->msk_softc;
3507 switch (sc->msk_hw_id) {
3508 case CHIP_ID_YUKON_EX:
3509 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
3510 goto yukon_ex_workaround;
3511 if (ifp->if_mtu > ETHERMTU)
3513 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3514 TX_JUMBO_ENA | TX_STFW_ENA);
3517 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3518 TX_JUMBO_DIS | TX_STFW_ENA);
3521 yukon_ex_workaround:
3522 if (ifp->if_mtu > ETHERMTU) {
3523 /* Set Tx GMAC FIFO Almost Empty Threshold. */
3525 MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3526 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3527 /* Disable Store & Forward mode for Tx. */
3529 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3530 TX_JUMBO_ENA | TX_STFW_DIS);
3532 /* Enable Store & Forward mode for Tx. */
3534 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3535 TX_JUMBO_DIS | TX_STFW_ENA);
3544 struct msk_if_softc *sc_if = xsc;
3547 msk_init_locked(sc_if);
3548 MSK_IF_UNLOCK(sc_if);
3552 msk_init_locked(struct msk_if_softc *sc_if)
3554 struct msk_softc *sc;
3556 struct mii_data *mii;
3562 MSK_IF_LOCK_ASSERT(sc_if);
3564 ifp = sc_if->msk_ifp;
3565 sc = sc_if->msk_softc;
3566 mii = device_get_softc(sc_if->msk_miibus);
3568 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3572 /* Cancel pending I/O and free all Rx/Tx buffers. */
3575 if (ifp->if_mtu < ETHERMTU)
3576 sc_if->msk_framesize = ETHERMTU;
3578 sc_if->msk_framesize = ifp->if_mtu;
3579 sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3580 if (ifp->if_mtu > ETHERMTU &&
3581 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
3582 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
3583 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
3586 /* GMAC Control reset. */
3587 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
3588 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
3589 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
3590 if (sc->msk_hw_id == CHIP_ID_YUKON_EX)
3591 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
3592 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
3596 * Initialize GMAC first such that speed/duplex/flow-control
3597 * parameters are renegotiated when interface is brought up.
3599 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
3601 /* Dummy read the Interrupt Source Register. */
3602 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3604 /* Clear MIB stats. */
3605 msk_stats_clear(sc_if);
3608 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3610 /* Setup Transmit Control Register. */
3611 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3613 /* Setup Transmit Flow Control Register. */
3614 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3616 /* Setup Transmit Parameter Register. */
3617 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3618 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3619 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3621 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3622 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3624 if (ifp->if_mtu > ETHERMTU)
3625 gmac |= GM_SMOD_JUMBO_ENA;
3626 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3628 /* Set station address. */
3629 eaddr = IF_LLADDR(ifp);
3630 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L,
3631 eaddr[0] | (eaddr[1] << 8));
3632 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1M,
3633 eaddr[2] | (eaddr[3] << 8));
3634 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1H,
3635 eaddr[4] | (eaddr[5] << 8));
3636 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L,
3637 eaddr[0] | (eaddr[1] << 8));
3638 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2M,
3639 eaddr[2] | (eaddr[3] << 8));
3640 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2H,
3641 eaddr[4] | (eaddr[5] << 8));
3643 /* Disable interrupts for counter overflows. */
3644 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3645 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3646 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3648 /* Configure Rx MAC FIFO. */
3649 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3650 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3651 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
3652 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
3653 sc->msk_hw_id == CHIP_ID_YUKON_EX)
3654 reg |= GMF_RX_OVER_ON;
3655 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
3657 /* Set receive filter. */
3658 msk_rxfilter(sc_if);
3660 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
3661 /* Clear flush mask - HW bug. */
3662 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0);
3664 /* Flush Rx MAC FIFO on any flow control or error. */
3665 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3670 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
3671 * due to hardware hang on receipt of pause frames.
3673 reg = RX_GMF_FL_THR_DEF + 1;
3674 /* Another magic for Yukon FE+ - From Linux. */
3675 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3676 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
3678 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
3680 /* Configure Tx MAC FIFO. */
3681 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3682 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3683 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3685 /* Configure hardware VLAN tag insertion/stripping. */
3686 msk_setvlan(sc_if, ifp);
3688 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
3689 /* Set Rx Pause threshold. */
3690 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3692 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3694 /* Configure store-and-forward for Tx. */
3695 msk_set_tx_stfwd(sc_if);
3698 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3699 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
3700 /* Disable dynamic watermark - from Linux. */
3701 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
3703 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
3707 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3708 * arbiter as we don't use Sync Tx queue.
3710 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3711 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3712 /* Enable the RAM Interface Arbiter. */
3713 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3715 /* Setup RAM buffer. */
3716 msk_set_rambuffer(sc_if);
3718 /* Disable Tx sync Queue. */
3719 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3721 /* Setup Tx Queue Bus Memory Interface. */
3722 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3723 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3724 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3725 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3726 switch (sc->msk_hw_id) {
3727 case CHIP_ID_YUKON_EC_U:
3728 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3729 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
3730 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
3734 case CHIP_ID_YUKON_EX:
3736 * Yukon Extreme seems to have silicon bug for
3737 * automatic Tx checksum calculation capability.
3739 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
3740 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
3745 /* Setup Rx Queue Bus Memory Interface. */
3746 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3747 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3748 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3749 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3750 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3751 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3752 /* MAC Rx RAM Read is controlled by hardware. */
3753 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3756 msk_set_prefetch(sc, sc_if->msk_txq,
3757 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3758 msk_init_tx_ring(sc_if);
3760 /* Disable Rx checksum offload and RSS hash. */
3761 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3762 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
3763 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
3764 msk_set_prefetch(sc, sc_if->msk_rxq,
3765 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3766 MSK_JUMBO_RX_RING_CNT - 1);
3767 error = msk_init_jumbo_rx_ring(sc_if);
3769 msk_set_prefetch(sc, sc_if->msk_rxq,
3770 sc_if->msk_rdata.msk_rx_ring_paddr,
3771 MSK_RX_RING_CNT - 1);
3772 error = msk_init_rx_ring(sc_if);
3775 device_printf(sc_if->msk_if_dev,
3776 "initialization failed: no memory for Rx buffers\n");
3780 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) {
3781 /* Disable flushing of non-ASF packets. */
3782 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3783 GMF_RX_MACSEC_FLUSH_OFF);
3786 /* Configure interrupt handling. */
3787 if (sc_if->msk_port == MSK_PORT_A) {
3788 sc->msk_intrmask |= Y2_IS_PORT_A;
3789 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3791 sc->msk_intrmask |= Y2_IS_PORT_B;
3792 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3794 /* Configure IRQ moderation mask. */
3795 CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask);
3796 if (sc->msk_int_holdoff > 0) {
3797 /* Configure initial IRQ moderation timer value. */
3798 CSR_WRITE_4(sc, B2_IRQM_INI,
3799 MSK_USECS(sc, sc->msk_int_holdoff));
3800 CSR_WRITE_4(sc, B2_IRQM_VAL,
3801 MSK_USECS(sc, sc->msk_int_holdoff));
3802 /* Start IRQ moderation. */
3803 CSR_WRITE_1(sc, B2_IRQM_CTRL, TIM_START);
3805 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3806 CSR_READ_4(sc, B0_HWE_IMSK);
3807 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3808 CSR_READ_4(sc, B0_IMSK);
3810 sc_if->msk_flags &= ~MSK_FLAG_LINK;
3813 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3814 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3816 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3820 msk_set_rambuffer(struct msk_if_softc *sc_if)
3822 struct msk_softc *sc;
3825 sc = sc_if->msk_softc;
3826 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
3829 /* Setup Rx Queue. */
3830 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3831 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3832 sc->msk_rxqstart[sc_if->msk_port] / 8);
3833 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
3834 sc->msk_rxqend[sc_if->msk_port] / 8);
3835 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
3836 sc->msk_rxqstart[sc_if->msk_port] / 8);
3837 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
3838 sc->msk_rxqstart[sc_if->msk_port] / 8);
3840 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3841 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
3842 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3843 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
3844 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
3845 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
3846 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
3847 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
3848 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3850 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
3851 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
3853 /* Setup Tx Queue. */
3854 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
3855 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
3856 sc->msk_txqstart[sc_if->msk_port] / 8);
3857 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
3858 sc->msk_txqend[sc_if->msk_port] / 8);
3859 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
3860 sc->msk_txqstart[sc_if->msk_port] / 8);
3861 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
3862 sc->msk_txqstart[sc_if->msk_port] / 8);
3863 /* Enable Store & Forward for Tx side. */
3864 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
3865 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
3866 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
3870 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
3874 /* Reset the prefetch unit. */
3875 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3877 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3879 /* Set LE base address. */
3880 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
3882 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
3884 /* Set the list last index. */
3885 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
3887 /* Turn on prefetch unit. */
3888 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3890 /* Dummy read to ensure write. */
3891 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
3895 msk_stop(struct msk_if_softc *sc_if)
3897 struct msk_softc *sc;
3898 struct msk_txdesc *txd;
3899 struct msk_rxdesc *rxd;
3900 struct msk_rxdesc *jrxd;
3905 MSK_IF_LOCK_ASSERT(sc_if);
3906 sc = sc_if->msk_softc;
3907 ifp = sc_if->msk_ifp;
3909 callout_stop(&sc_if->msk_tick_ch);
3910 sc_if->msk_watchdog_timer = 0;
3912 /* Disable interrupts. */
3913 if (sc_if->msk_port == MSK_PORT_A) {
3914 sc->msk_intrmask &= ~Y2_IS_PORT_A;
3915 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
3917 sc->msk_intrmask &= ~Y2_IS_PORT_B;
3918 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
3920 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3921 CSR_READ_4(sc, B0_HWE_IMSK);
3922 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3923 CSR_READ_4(sc, B0_IMSK);
3925 /* Disable Tx/Rx MAC. */
3926 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3927 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
3928 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
3929 /* Read again to ensure writing. */
3930 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3931 /* Update stats and clear counters. */
3932 msk_stats_update(sc_if);
3935 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
3936 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3937 for (i = 0; i < MSK_TIMEOUT; i++) {
3938 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
3939 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3941 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3946 if (i == MSK_TIMEOUT)
3947 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
3948 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
3949 RB_RST_SET | RB_DIS_OP_MD);
3951 /* Disable all GMAC interrupt. */
3952 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
3953 /* Disable PHY interrupt. */
3954 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
3956 /* Disable the RAM Interface Arbiter. */
3957 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
3959 /* Reset the PCI FIFO of the async Tx queue */
3960 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3961 BMU_RST_SET | BMU_FIFO_RST);
3963 /* Reset the Tx prefetch units. */
3964 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
3967 /* Reset the RAM Buffer async Tx queue. */
3968 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
3970 /* Reset Tx MAC FIFO. */
3971 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3972 /* Set Pause Off. */
3973 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
3976 * The Rx Stop command will not work for Yukon-2 if the BMU does not
3977 * reach the end of packet and since we can't make sure that we have
3978 * incoming data, we must reset the BMU while it is not during a DMA
3979 * transfer. Since it is possible that the Rx path is still active,
3980 * the Rx RAM buffer will be stopped first, so any possible incoming
3981 * data will not trigger a DMA. After the RAM buffer is stopped, the
3982 * BMU is polled until any DMA in progress is ended and only then it
3986 /* Disable the RAM Buffer receive queue. */
3987 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
3988 for (i = 0; i < MSK_TIMEOUT; i++) {
3989 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
3990 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
3994 if (i == MSK_TIMEOUT)
3995 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
3996 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3997 BMU_RST_SET | BMU_FIFO_RST);
3998 /* Reset the Rx prefetch unit. */
3999 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
4001 /* Reset the RAM Buffer receive queue. */
4002 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
4003 /* Reset Rx MAC FIFO. */
4004 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
4006 /* Free Rx and Tx mbufs still in the queues. */
4007 for (i = 0; i < MSK_RX_RING_CNT; i++) {
4008 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
4009 if (rxd->rx_m != NULL) {
4010 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
4011 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4012 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
4018 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
4019 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
4020 if (jrxd->rx_m != NULL) {
4021 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
4022 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4023 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
4025 m_freem(jrxd->rx_m);
4029 for (i = 0; i < MSK_TX_RING_CNT; i++) {
4030 txd = &sc_if->msk_cdata.msk_txdesc[i];
4031 if (txd->tx_m != NULL) {
4032 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
4033 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
4034 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
4042 * Mark the interface down.
4044 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4045 sc_if->msk_flags &= ~MSK_FLAG_LINK;
4049 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
4050 * counter clears high 16 bits of the counter such that accessing
4051 * lower 16 bits should be the last operation.
4053 #define MSK_READ_MIB32(x, y) \
4054 (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) + \
4055 (uint32_t)GMAC_READ_2(sc, x, y)
4056 #define MSK_READ_MIB64(x, y) \
4057 (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) + \
4058 (uint64_t)MSK_READ_MIB32(x, y)
4061 msk_stats_clear(struct msk_if_softc *sc_if)
4063 struct msk_softc *sc;
4068 MSK_IF_LOCK_ASSERT(sc_if);
4070 sc = sc_if->msk_softc;
4071 /* Set MIB Clear Counter Mode. */
4072 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
4073 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
4074 /* Read all MIB Counters with Clear Mode set. */
4075 for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof(uint32_t))
4076 reg = MSK_READ_MIB32(sc_if->msk_port, i);
4077 /* Clear MIB Clear Counter Mode. */
4078 gmac &= ~GM_PAR_MIB_CLR;
4079 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4083 msk_stats_update(struct msk_if_softc *sc_if)
4085 struct msk_softc *sc;
4087 struct msk_hw_stats *stats;
4091 MSK_IF_LOCK_ASSERT(sc_if);
4093 ifp = sc_if->msk_ifp;
4094 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
4096 sc = sc_if->msk_softc;
4097 stats = &sc_if->msk_stats;
4098 /* Set MIB Clear Counter Mode. */
4099 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
4100 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
4103 stats->rx_ucast_frames +=
4104 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK);
4105 stats->rx_bcast_frames +=
4106 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK);
4107 stats->rx_pause_frames +=
4108 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE);
4109 stats->rx_mcast_frames +=
4110 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK);
4111 stats->rx_crc_errs +=
4112 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR);
4113 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1);
4114 stats->rx_good_octets +=
4115 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO);
4116 stats->rx_bad_octets +=
4117 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO);
4119 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT);
4120 stats->rx_runt_errs +=
4121 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG);
4122 stats->rx_pkts_64 +=
4123 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B);
4124 stats->rx_pkts_65_127 +=
4125 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B);
4126 stats->rx_pkts_128_255 +=
4127 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B);
4128 stats->rx_pkts_256_511 +=
4129 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B);
4130 stats->rx_pkts_512_1023 +=
4131 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B);
4132 stats->rx_pkts_1024_1518 +=
4133 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B);
4134 stats->rx_pkts_1519_max +=
4135 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ);
4136 stats->rx_pkts_too_long +=
4137 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR);
4138 stats->rx_pkts_jabbers +=
4139 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT);
4140 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2);
4141 stats->rx_fifo_oflows +=
4142 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV);
4143 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3);
4146 stats->tx_ucast_frames +=
4147 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK);
4148 stats->tx_bcast_frames +=
4149 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK);
4150 stats->tx_pause_frames +=
4151 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE);
4152 stats->tx_mcast_frames +=
4153 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK);
4155 MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO);
4156 stats->tx_pkts_64 +=
4157 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B);
4158 stats->tx_pkts_65_127 +=
4159 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B);
4160 stats->tx_pkts_128_255 +=
4161 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B);
4162 stats->tx_pkts_256_511 +=
4163 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B);
4164 stats->tx_pkts_512_1023 +=
4165 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B);
4166 stats->tx_pkts_1024_1518 +=
4167 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B);
4168 stats->tx_pkts_1519_max +=
4169 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ);
4170 reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1);
4172 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL);
4173 stats->tx_late_colls +=
4174 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL);
4175 stats->tx_excess_colls +=
4176 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL);
4177 stats->tx_multi_colls +=
4178 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL);
4179 stats->tx_single_colls +=
4180 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL);
4181 stats->tx_underflows +=
4182 MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR);
4183 /* Clear MIB Clear Counter Mode. */
4184 gmac &= ~GM_PAR_MIB_CLR;
4185 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4189 msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
4191 struct msk_softc *sc;
4192 struct msk_if_softc *sc_if;
4193 uint32_t result, *stat;
4196 sc_if = (struct msk_if_softc *)arg1;
4197 sc = sc_if->msk_softc;
4199 stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off);
4202 result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4204 MSK_IF_UNLOCK(sc_if);
4206 return (sysctl_handle_int(oidp, &result, 0, req));
4210 msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
4212 struct msk_softc *sc;
4213 struct msk_if_softc *sc_if;
4214 uint64_t result, *stat;
4217 sc_if = (struct msk_if_softc *)arg1;
4218 sc = sc_if->msk_softc;
4220 stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off);
4223 result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4225 MSK_IF_UNLOCK(sc_if);
4227 return (sysctl_handle_quad(oidp, &result, 0, req));
4230 #undef MSK_READ_MIB32
4231 #undef MSK_READ_MIB64
4233 #define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) \
4234 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \
4235 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32, \
4237 #define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) \
4238 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \
4239 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64, \
4243 msk_sysctl_node(struct msk_if_softc *sc_if)
4245 struct sysctl_ctx_list *ctx;
4246 struct sysctl_oid_list *child, *schild;
4247 struct sysctl_oid *tree;
4249 ctx = device_get_sysctl_ctx(sc_if->msk_if_dev);
4250 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev));
4252 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
4253 NULL, "MSK Statistics");
4254 schild = child = SYSCTL_CHILDREN(tree);
4255 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
4256 NULL, "MSK RX Statistics");
4257 child = SYSCTL_CHILDREN(tree);
4258 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4259 child, rx_ucast_frames, "Good unicast frames");
4260 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4261 child, rx_bcast_frames, "Good broadcast frames");
4262 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4263 child, rx_pause_frames, "Pause frames");
4264 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4265 child, rx_mcast_frames, "Multicast frames");
4266 MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs",
4267 child, rx_crc_errs, "CRC errors");
4268 MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets",
4269 child, rx_good_octets, "Good octets");
4270 MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets",
4271 child, rx_bad_octets, "Bad octets");
4272 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4273 child, rx_pkts_64, "64 bytes frames");
4274 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4275 child, rx_pkts_65_127, "65 to 127 bytes frames");
4276 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4277 child, rx_pkts_128_255, "128 to 255 bytes frames");
4278 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4279 child, rx_pkts_256_511, "256 to 511 bytes frames");
4280 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4281 child, rx_pkts_512_1023, "512 to 1023 bytes frames");
4282 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4283 child, rx_pkts_1024_1518, "1024 to 1518 bytes frames");
4284 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4285 child, rx_pkts_1519_max, "1519 to max frames");
4286 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long",
4287 child, rx_pkts_too_long, "frames too long");
4288 MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers",
4289 child, rx_pkts_jabbers, "Jabber errors");
4290 MSK_SYSCTL_STAT32(sc_if, ctx, "overflows",
4291 child, rx_fifo_oflows, "FIFO overflows");
4293 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
4294 NULL, "MSK TX Statistics");
4295 child = SYSCTL_CHILDREN(tree);
4296 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4297 child, tx_ucast_frames, "Unicast frames");
4298 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4299 child, tx_bcast_frames, "Broadcast frames");
4300 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4301 child, tx_pause_frames, "Pause frames");
4302 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4303 child, tx_mcast_frames, "Multicast frames");
4304 MSK_SYSCTL_STAT64(sc_if, ctx, "octets",
4305 child, tx_octets, "Octets");
4306 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4307 child, tx_pkts_64, "64 bytes frames");
4308 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4309 child, tx_pkts_65_127, "65 to 127 bytes frames");
4310 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4311 child, tx_pkts_128_255, "128 to 255 bytes frames");
4312 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4313 child, tx_pkts_256_511, "256 to 511 bytes frames");
4314 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4315 child, tx_pkts_512_1023, "512 to 1023 bytes frames");
4316 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4317 child, tx_pkts_1024_1518, "1024 to 1518 bytes frames");
4318 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4319 child, tx_pkts_1519_max, "1519 to max frames");
4320 MSK_SYSCTL_STAT32(sc_if, ctx, "colls",
4321 child, tx_colls, "Collisions");
4322 MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls",
4323 child, tx_late_colls, "Late collisions");
4324 MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls",
4325 child, tx_excess_colls, "Excessive collisions");
4326 MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls",
4327 child, tx_multi_colls, "Multiple collisions");
4328 MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls",
4329 child, tx_single_colls, "Single collisions");
4330 MSK_SYSCTL_STAT32(sc_if, ctx, "underflows",
4331 child, tx_underflows, "FIFO underflows");
4334 #undef MSK_SYSCTL_STAT32
4335 #undef MSK_SYSCTL_STAT64
4338 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4344 value = *(int *)arg1;
4345 error = sysctl_handle_int(oidp, &value, 0, req);
4346 if (error || !req->newptr)
4348 if (value < low || value > high)
4350 *(int *)arg1 = value;
4356 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
4359 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,