1 /******************************************************************************
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
9 *****************************************************************************/
11 /******************************************************************************
14 * Copyright (C) Marvell International Ltd. and/or its affiliates
16 * The computer program files contained in this folder ("Files")
17 * are provided to you under the BSD-type license terms provided
18 * below, and any use of such Files and any derivative works
19 * thereof created by you shall be governed by the following terms
22 * - Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials provided
27 * with the distribution.
28 * - Neither the name of Marvell nor the names of its contributors
29 * may be used to endorse or promote products derived from this
30 * software without specific prior written permission.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 *****************************************************************************/
49 * Copyright (c) 1997, 1998, 1999, 2000
50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
96 * Device driver for the Marvell Yukon II Ethernet controller.
97 * Due to lack of documentation, this driver is based on the code from
98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
101 #include <sys/cdefs.h>
102 __FBSDID("$FreeBSD$");
104 #include <sys/param.h>
105 #include <sys/systm.h>
107 #include <sys/endian.h>
108 #include <sys/mbuf.h>
109 #include <sys/malloc.h>
110 #include <sys/kernel.h>
111 #include <sys/module.h>
112 #include <sys/socket.h>
113 #include <sys/sockio.h>
114 #include <sys/queue.h>
115 #include <sys/sysctl.h>
118 #include <net/ethernet.h>
120 #include <net/if_arp.h>
121 #include <net/if_dl.h>
122 #include <net/if_media.h>
123 #include <net/if_types.h>
124 #include <net/if_vlan_var.h>
126 #include <netinet/in.h>
127 #include <netinet/in_systm.h>
128 #include <netinet/ip.h>
129 #include <netinet/tcp.h>
130 #include <netinet/udp.h>
132 #include <machine/bus.h>
133 #include <machine/in_cksum.h>
134 #include <machine/resource.h>
135 #include <sys/rman.h>
137 #include <dev/mii/mii.h>
138 #include <dev/mii/miivar.h>
140 #include <dev/pci/pcireg.h>
141 #include <dev/pci/pcivar.h>
143 #include <dev/msk/if_mskreg.h>
145 MODULE_DEPEND(msk, pci, 1, 1, 1);
146 MODULE_DEPEND(msk, ether, 1, 1, 1);
147 MODULE_DEPEND(msk, miibus, 1, 1, 1);
149 /* "device miibus" required. See GENERIC if you get errors here. */
150 #include "miibus_if.h"
153 static int msi_disable = 0;
154 TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
155 static int legacy_intr = 0;
156 TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
157 static int jumbo_disable = 0;
158 TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
160 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
163 * Devices supported by this driver.
165 static struct msk_product {
166 uint16_t msk_vendorid;
167 uint16_t msk_deviceid;
168 const char *msk_name;
170 { VENDORID_SK, DEVICEID_SK_YUKON2,
171 "SK-9Sxx Gigabit Ethernet" },
172 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
173 "SK-9Exx Gigabit Ethernet"},
174 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
175 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
176 { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
177 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
178 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
179 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
180 { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
181 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
182 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
183 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
184 { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
185 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
186 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
187 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
188 { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
189 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
190 { VENDORID_MARVELL, DEVICEID_MRVL_8035,
191 "Marvell Yukon 88E8035 Fast Ethernet" },
192 { VENDORID_MARVELL, DEVICEID_MRVL_8036,
193 "Marvell Yukon 88E8036 Fast Ethernet" },
194 { VENDORID_MARVELL, DEVICEID_MRVL_8038,
195 "Marvell Yukon 88E8038 Fast Ethernet" },
196 { VENDORID_MARVELL, DEVICEID_MRVL_8039,
197 "Marvell Yukon 88E8039 Fast Ethernet" },
198 { VENDORID_MARVELL, DEVICEID_MRVL_8040,
199 "Marvell Yukon 88E8040 Fast Ethernet" },
200 { VENDORID_MARVELL, DEVICEID_MRVL_8040T,
201 "Marvell Yukon 88E8040T Fast Ethernet" },
202 { VENDORID_MARVELL, DEVICEID_MRVL_8042,
203 "Marvell Yukon 88E8042 Fast Ethernet" },
204 { VENDORID_MARVELL, DEVICEID_MRVL_8048,
205 "Marvell Yukon 88E8048 Fast Ethernet" },
206 { VENDORID_MARVELL, DEVICEID_MRVL_4361,
207 "Marvell Yukon 88E8050 Gigabit Ethernet" },
208 { VENDORID_MARVELL, DEVICEID_MRVL_4360,
209 "Marvell Yukon 88E8052 Gigabit Ethernet" },
210 { VENDORID_MARVELL, DEVICEID_MRVL_4362,
211 "Marvell Yukon 88E8053 Gigabit Ethernet" },
212 { VENDORID_MARVELL, DEVICEID_MRVL_4363,
213 "Marvell Yukon 88E8055 Gigabit Ethernet" },
214 { VENDORID_MARVELL, DEVICEID_MRVL_4364,
215 "Marvell Yukon 88E8056 Gigabit Ethernet" },
216 { VENDORID_MARVELL, DEVICEID_MRVL_4365,
217 "Marvell Yukon 88E8070 Gigabit Ethernet" },
218 { VENDORID_MARVELL, DEVICEID_MRVL_436A,
219 "Marvell Yukon 88E8058 Gigabit Ethernet" },
220 { VENDORID_MARVELL, DEVICEID_MRVL_436B,
221 "Marvell Yukon 88E8071 Gigabit Ethernet" },
222 { VENDORID_MARVELL, DEVICEID_MRVL_436C,
223 "Marvell Yukon 88E8072 Gigabit Ethernet" },
224 { VENDORID_MARVELL, DEVICEID_MRVL_436D,
225 "Marvell Yukon 88E8055 Gigabit Ethernet" },
226 { VENDORID_MARVELL, DEVICEID_MRVL_4370,
227 "Marvell Yukon 88E8075 Gigabit Ethernet" },
228 { VENDORID_MARVELL, DEVICEID_MRVL_4380,
229 "Marvell Yukon 88E8057 Gigabit Ethernet" },
230 { VENDORID_MARVELL, DEVICEID_MRVL_4381,
231 "Marvell Yukon 88E8059 Gigabit Ethernet" },
232 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
233 "D-Link 550SX Gigabit Ethernet" },
234 { VENDORID_DLINK, DEVICEID_DLINK_DGE560SX,
235 "D-Link 560SX Gigabit Ethernet" },
236 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
237 "D-Link 560T Gigabit Ethernet" }
240 static const char *model_name[] = {
253 static int mskc_probe(device_t);
254 static int mskc_attach(device_t);
255 static int mskc_detach(device_t);
256 static int mskc_shutdown(device_t);
257 static int mskc_setup_rambuffer(struct msk_softc *);
258 static int mskc_suspend(device_t);
259 static int mskc_resume(device_t);
260 static void mskc_reset(struct msk_softc *);
262 static int msk_probe(device_t);
263 static int msk_attach(device_t);
264 static int msk_detach(device_t);
266 static void msk_tick(void *);
267 static void msk_intr(void *);
268 static void msk_intr_phy(struct msk_if_softc *);
269 static void msk_intr_gmac(struct msk_if_softc *);
270 static __inline void msk_rxput(struct msk_if_softc *);
271 static int msk_handle_events(struct msk_softc *);
272 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
273 static void msk_intr_hwerr(struct msk_softc *);
274 #ifndef __NO_STRICT_ALIGNMENT
275 static __inline void msk_fixup_rx(struct mbuf *);
277 static __inline void msk_rxcsum(struct msk_if_softc *, uint32_t, struct mbuf *);
278 static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
279 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
280 static void msk_txeof(struct msk_if_softc *, int);
281 static int msk_encap(struct msk_if_softc *, struct mbuf **);
282 static void msk_start(struct ifnet *);
283 static void msk_start_locked(struct ifnet *);
284 static int msk_ioctl(struct ifnet *, u_long, caddr_t);
285 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
286 static void msk_set_rambuffer(struct msk_if_softc *);
287 static void msk_set_tx_stfwd(struct msk_if_softc *);
288 static void msk_init(void *);
289 static void msk_init_locked(struct msk_if_softc *);
290 static void msk_stop(struct msk_if_softc *);
291 static void msk_watchdog(struct msk_if_softc *);
292 static int msk_mediachange(struct ifnet *);
293 static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
294 static void msk_phy_power(struct msk_softc *, int);
295 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
296 static int msk_status_dma_alloc(struct msk_softc *);
297 static void msk_status_dma_free(struct msk_softc *);
298 static int msk_txrx_dma_alloc(struct msk_if_softc *);
299 static int msk_rx_dma_jalloc(struct msk_if_softc *);
300 static void msk_txrx_dma_free(struct msk_if_softc *);
301 static void msk_rx_dma_jfree(struct msk_if_softc *);
302 static int msk_rx_fill(struct msk_if_softc *, int);
303 static int msk_init_rx_ring(struct msk_if_softc *);
304 static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
305 static void msk_init_tx_ring(struct msk_if_softc *);
306 static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
307 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
308 static int msk_newbuf(struct msk_if_softc *, int);
309 static int msk_jumbo_newbuf(struct msk_if_softc *, int);
311 static int msk_phy_readreg(struct msk_if_softc *, int, int);
312 static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
313 static int msk_miibus_readreg(device_t, int, int);
314 static int msk_miibus_writereg(device_t, int, int, int);
315 static void msk_miibus_statchg(device_t);
317 static void msk_rxfilter(struct msk_if_softc *);
318 static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
320 static void msk_stats_clear(struct msk_if_softc *);
321 static void msk_stats_update(struct msk_if_softc *);
322 static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS);
323 static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS);
324 static void msk_sysctl_node(struct msk_if_softc *);
325 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
326 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
328 static device_method_t mskc_methods[] = {
329 /* Device interface */
330 DEVMETHOD(device_probe, mskc_probe),
331 DEVMETHOD(device_attach, mskc_attach),
332 DEVMETHOD(device_detach, mskc_detach),
333 DEVMETHOD(device_suspend, mskc_suspend),
334 DEVMETHOD(device_resume, mskc_resume),
335 DEVMETHOD(device_shutdown, mskc_shutdown),
338 DEVMETHOD(bus_print_child, bus_generic_print_child),
339 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
344 static driver_t mskc_driver = {
347 sizeof(struct msk_softc)
350 static devclass_t mskc_devclass;
352 static device_method_t msk_methods[] = {
353 /* Device interface */
354 DEVMETHOD(device_probe, msk_probe),
355 DEVMETHOD(device_attach, msk_attach),
356 DEVMETHOD(device_detach, msk_detach),
357 DEVMETHOD(device_shutdown, bus_generic_shutdown),
360 DEVMETHOD(bus_print_child, bus_generic_print_child),
361 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
364 DEVMETHOD(miibus_readreg, msk_miibus_readreg),
365 DEVMETHOD(miibus_writereg, msk_miibus_writereg),
366 DEVMETHOD(miibus_statchg, msk_miibus_statchg),
371 static driver_t msk_driver = {
374 sizeof(struct msk_if_softc)
377 static devclass_t msk_devclass;
379 DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0);
380 DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0);
381 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0);
383 static struct resource_spec msk_res_spec_io[] = {
384 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
388 static struct resource_spec msk_res_spec_mem[] = {
389 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
393 static struct resource_spec msk_irq_spec_legacy[] = {
394 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
398 static struct resource_spec msk_irq_spec_msi[] = {
399 { SYS_RES_IRQ, 1, RF_ACTIVE },
404 msk_miibus_readreg(device_t dev, int phy, int reg)
406 struct msk_if_softc *sc_if;
408 sc_if = device_get_softc(dev);
410 return (msk_phy_readreg(sc_if, phy, reg));
414 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
416 struct msk_softc *sc;
419 sc = sc_if->msk_softc;
421 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
422 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
424 for (i = 0; i < MSK_TIMEOUT; i++) {
426 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
427 if ((val & GM_SMI_CT_RD_VAL) != 0) {
428 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
433 if (i == MSK_TIMEOUT) {
434 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
442 msk_miibus_writereg(device_t dev, int phy, int reg, int val)
444 struct msk_if_softc *sc_if;
446 sc_if = device_get_softc(dev);
448 return (msk_phy_writereg(sc_if, phy, reg, val));
452 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
454 struct msk_softc *sc;
457 sc = sc_if->msk_softc;
459 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
460 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
461 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
462 for (i = 0; i < MSK_TIMEOUT; i++) {
464 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
465 GM_SMI_CT_BUSY) == 0)
468 if (i == MSK_TIMEOUT)
469 if_printf(sc_if->msk_ifp, "phy write timeout\n");
475 msk_miibus_statchg(device_t dev)
477 struct msk_softc *sc;
478 struct msk_if_softc *sc_if;
479 struct mii_data *mii;
483 sc_if = device_get_softc(dev);
484 sc = sc_if->msk_softc;
486 MSK_IF_LOCK_ASSERT(sc_if);
488 mii = device_get_softc(sc_if->msk_miibus);
489 ifp = sc_if->msk_ifp;
490 if (mii == NULL || ifp == NULL ||
491 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
494 sc_if->msk_flags &= ~MSK_FLAG_LINK;
495 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
496 (IFM_AVALID | IFM_ACTIVE)) {
497 switch (IFM_SUBTYPE(mii->mii_media_active)) {
500 sc_if->msk_flags |= MSK_FLAG_LINK;
506 if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
507 sc_if->msk_flags |= MSK_FLAG_LINK;
514 if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) {
515 /* Enable Tx FIFO Underrun. */
516 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
517 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
519 * Because mii(4) notify msk(4) that it detected link status
520 * change, there is no need to enable automatic
521 * speed/flow-control/duplex updates.
523 gmac = GM_GPCR_AU_ALL_DIS;
524 switch (IFM_SUBTYPE(mii->mii_media_active)) {
527 gmac |= GM_GPCR_SPEED_1000;
530 gmac |= GM_GPCR_SPEED_100;
536 if ((IFM_OPTIONS(mii->mii_media_active) &
537 IFM_ETH_RXPAUSE) == 0)
538 gmac |= GM_GPCR_FC_RX_DIS;
539 if ((IFM_OPTIONS(mii->mii_media_active) &
540 IFM_ETH_TXPAUSE) == 0)
541 gmac |= GM_GPCR_FC_TX_DIS;
542 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
543 gmac |= GM_GPCR_DUP_FULL;
545 gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS;
546 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
547 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
548 /* Read again to ensure writing. */
549 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
550 gmac = GMC_PAUSE_OFF;
551 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
552 if ((IFM_OPTIONS(mii->mii_media_active) &
553 IFM_ETH_RXPAUSE) != 0)
556 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
558 /* Enable PHY interrupt for FIFO underrun/overflow. */
559 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
560 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
563 * Link state changed to down.
564 * Disable PHY interrupts.
566 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
567 /* Disable Rx/Tx MAC. */
568 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
569 if ((gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) != 0) {
570 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
571 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
572 /* Read again to ensure writing. */
573 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
579 msk_rxfilter(struct msk_if_softc *sc_if)
581 struct msk_softc *sc;
583 struct ifmultiaddr *ifma;
588 sc = sc_if->msk_softc;
590 MSK_IF_LOCK_ASSERT(sc_if);
592 ifp = sc_if->msk_ifp;
594 bzero(mchash, sizeof(mchash));
595 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
596 if ((ifp->if_flags & IFF_PROMISC) != 0)
597 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
598 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
599 mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
603 mode |= GM_RXCR_UCF_ENA;
605 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
606 if (ifma->ifma_addr->sa_family != AF_LINK)
608 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
609 ifma->ifma_addr), ETHER_ADDR_LEN);
610 /* Just want the 6 least significant bits. */
612 /* Set the corresponding bit in the hash table. */
613 mchash[crc >> 5] |= 1 << (crc & 0x1f);
615 if_maddr_runlock(ifp);
616 if (mchash[0] != 0 || mchash[1] != 0)
617 mode |= GM_RXCR_MCF_ENA;
620 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
622 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
623 (mchash[0] >> 16) & 0xffff);
624 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
626 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
627 (mchash[1] >> 16) & 0xffff);
628 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
632 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
634 struct msk_softc *sc;
636 sc = sc_if->msk_softc;
637 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
638 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
640 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
643 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
645 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
651 msk_rx_fill(struct msk_if_softc *sc_if, int jumbo)
656 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
657 (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
658 /* Wait until controller executes OP_TCPSTART command. */
659 for (i = 10; i > 0; i--) {
661 idx = CSR_READ_2(sc_if->msk_softc,
662 Y2_PREF_Q_ADDR(sc_if->msk_rxq,
663 PREF_UNIT_GET_IDX_REG));
668 device_printf(sc_if->msk_if_dev,
669 "prefetch unit stuck?\n");
673 * Fill consumed LE with free buffer. This can be done
674 * in Rx handler but we don't want to add special code
678 if (msk_jumbo_newbuf(sc_if, 0) != 0)
680 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
681 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
682 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
684 if (msk_newbuf(sc_if, 0) != 0)
686 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
687 sc_if->msk_cdata.msk_rx_ring_map,
688 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
690 sc_if->msk_cdata.msk_rx_prod = 0;
691 CSR_WRITE_2(sc_if->msk_softc,
692 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
693 sc_if->msk_cdata.msk_rx_prod);
699 msk_init_rx_ring(struct msk_if_softc *sc_if)
701 struct msk_ring_data *rd;
702 struct msk_rxdesc *rxd;
705 MSK_IF_LOCK_ASSERT(sc_if);
707 sc_if->msk_cdata.msk_rx_cons = 0;
708 sc_if->msk_cdata.msk_rx_prod = 0;
709 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
711 rd = &sc_if->msk_rdata;
712 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
713 prod = sc_if->msk_cdata.msk_rx_prod;
715 /* Have controller know how to compute Rx checksum. */
716 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
717 (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
718 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
720 rxd->rx_le = &rd->msk_rx_ring[prod];
721 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
723 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
724 MSK_INC(prod, MSK_RX_RING_CNT);
725 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
728 for (; i < MSK_RX_RING_CNT; i++) {
729 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
731 rxd->rx_le = &rd->msk_rx_ring[prod];
732 if (msk_newbuf(sc_if, prod) != 0)
734 MSK_INC(prod, MSK_RX_RING_CNT);
737 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
738 sc_if->msk_cdata.msk_rx_ring_map,
739 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
741 /* Update prefetch unit. */
742 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
743 CSR_WRITE_2(sc_if->msk_softc,
744 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
745 sc_if->msk_cdata.msk_rx_prod);
746 if (msk_rx_fill(sc_if, 0) != 0)
752 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
754 struct msk_ring_data *rd;
755 struct msk_rxdesc *rxd;
758 MSK_IF_LOCK_ASSERT(sc_if);
760 sc_if->msk_cdata.msk_rx_cons = 0;
761 sc_if->msk_cdata.msk_rx_prod = 0;
762 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
764 rd = &sc_if->msk_rdata;
765 bzero(rd->msk_jumbo_rx_ring,
766 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
767 prod = sc_if->msk_cdata.msk_rx_prod;
769 /* Have controller know how to compute Rx checksum. */
770 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
771 (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
772 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
774 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
775 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
777 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
778 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
779 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
782 for (; i < MSK_JUMBO_RX_RING_CNT; i++) {
783 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
785 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
786 if (msk_jumbo_newbuf(sc_if, prod) != 0)
788 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
791 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
792 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
793 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
795 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
796 CSR_WRITE_2(sc_if->msk_softc,
797 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
798 sc_if->msk_cdata.msk_rx_prod);
799 if (msk_rx_fill(sc_if, 1) != 0)
805 msk_init_tx_ring(struct msk_if_softc *sc_if)
807 struct msk_ring_data *rd;
808 struct msk_txdesc *txd;
811 sc_if->msk_cdata.msk_tso_mtu = 0;
812 sc_if->msk_cdata.msk_last_csum = 0;
813 sc_if->msk_cdata.msk_tx_prod = 0;
814 sc_if->msk_cdata.msk_tx_cons = 0;
815 sc_if->msk_cdata.msk_tx_cnt = 0;
817 rd = &sc_if->msk_rdata;
818 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
819 for (i = 0; i < MSK_TX_RING_CNT; i++) {
820 txd = &sc_if->msk_cdata.msk_txdesc[i];
822 txd->tx_le = &rd->msk_tx_ring[i];
825 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
826 sc_if->msk_cdata.msk_tx_ring_map,
827 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
831 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
833 struct msk_rx_desc *rx_le;
834 struct msk_rxdesc *rxd;
837 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
840 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
844 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
846 struct msk_rx_desc *rx_le;
847 struct msk_rxdesc *rxd;
850 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
853 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
857 msk_newbuf(struct msk_if_softc *sc_if, int idx)
859 struct msk_rx_desc *rx_le;
860 struct msk_rxdesc *rxd;
862 bus_dma_segment_t segs[1];
866 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
870 m->m_len = m->m_pkthdr.len = MCLBYTES;
871 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
872 m_adj(m, ETHER_ALIGN);
873 #ifndef __NO_STRICT_ALIGNMENT
875 m_adj(m, MSK_RX_BUF_ALIGN);
878 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
879 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
880 BUS_DMA_NOWAIT) != 0) {
884 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
886 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
887 if (rxd->rx_m != NULL) {
888 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
889 BUS_DMASYNC_POSTREAD);
890 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
892 map = rxd->rx_dmamap;
893 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
894 sc_if->msk_cdata.msk_rx_sparemap = map;
895 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
896 BUS_DMASYNC_PREREAD);
899 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
901 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
907 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
909 struct msk_rx_desc *rx_le;
910 struct msk_rxdesc *rxd;
912 bus_dma_segment_t segs[1];
916 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
919 if ((m->m_flags & M_EXT) == 0) {
923 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
924 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
925 m_adj(m, ETHER_ALIGN);
926 #ifndef __NO_STRICT_ALIGNMENT
928 m_adj(m, MSK_RX_BUF_ALIGN);
931 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
932 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
933 BUS_DMA_NOWAIT) != 0) {
937 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
939 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
940 if (rxd->rx_m != NULL) {
941 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
942 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
943 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
946 map = rxd->rx_dmamap;
947 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
948 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
949 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
950 BUS_DMASYNC_PREREAD);
953 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
955 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
964 msk_mediachange(struct ifnet *ifp)
966 struct msk_if_softc *sc_if;
967 struct mii_data *mii;
970 sc_if = ifp->if_softc;
973 mii = device_get_softc(sc_if->msk_miibus);
974 error = mii_mediachg(mii);
975 MSK_IF_UNLOCK(sc_if);
981 * Report current media status.
984 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
986 struct msk_if_softc *sc_if;
987 struct mii_data *mii;
989 sc_if = ifp->if_softc;
991 if ((ifp->if_flags & IFF_UP) == 0) {
992 MSK_IF_UNLOCK(sc_if);
995 mii = device_get_softc(sc_if->msk_miibus);
998 MSK_IF_UNLOCK(sc_if);
999 ifmr->ifm_active = mii->mii_media_active;
1000 ifmr->ifm_status = mii->mii_media_status;
1004 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1006 struct msk_if_softc *sc_if;
1008 struct mii_data *mii;
1009 int error, mask, reinit;
1011 sc_if = ifp->if_softc;
1012 ifr = (struct ifreq *)data;
1018 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
1020 else if (ifp->if_mtu != ifr->ifr_mtu) {
1021 if (ifr->ifr_mtu > ETHERMTU) {
1022 if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
1024 MSK_IF_UNLOCK(sc_if);
1027 if ((sc_if->msk_flags &
1028 MSK_FLAG_JUMBO_NOCSUM) != 0) {
1030 ~(MSK_CSUM_FEATURES | CSUM_TSO);
1031 ifp->if_capenable &=
1032 ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1033 VLAN_CAPABILITIES(ifp);
1036 ifp->if_mtu = ifr->ifr_mtu;
1037 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1038 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1039 msk_init_locked(sc_if);
1042 MSK_IF_UNLOCK(sc_if);
1046 if ((ifp->if_flags & IFF_UP) != 0) {
1047 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1048 ((ifp->if_flags ^ sc_if->msk_if_flags) &
1049 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1050 msk_rxfilter(sc_if);
1051 else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0)
1052 msk_init_locked(sc_if);
1053 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1055 sc_if->msk_if_flags = ifp->if_flags;
1056 MSK_IF_UNLOCK(sc_if);
1061 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1062 msk_rxfilter(sc_if);
1063 MSK_IF_UNLOCK(sc_if);
1067 mii = device_get_softc(sc_if->msk_miibus);
1068 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1073 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1074 if ((mask & IFCAP_TXCSUM) != 0 &&
1075 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1076 ifp->if_capenable ^= IFCAP_TXCSUM;
1077 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1078 ifp->if_hwassist |= MSK_CSUM_FEATURES;
1080 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
1082 if ((mask & IFCAP_RXCSUM) != 0 &&
1083 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
1084 ifp->if_capenable ^= IFCAP_RXCSUM;
1085 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
1088 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1089 (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0)
1090 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1091 if ((mask & IFCAP_TSO4) != 0 &&
1092 (IFCAP_TSO4 & ifp->if_capabilities) != 0) {
1093 ifp->if_capenable ^= IFCAP_TSO4;
1094 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1095 ifp->if_hwassist |= CSUM_TSO;
1097 ifp->if_hwassist &= ~CSUM_TSO;
1099 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1100 (IFCAP_VLAN_HWTSO & ifp->if_capabilities) != 0)
1101 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1102 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1103 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
1104 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1105 if ((IFCAP_VLAN_HWTAGGING & ifp->if_capenable) == 0)
1106 ifp->if_capenable &=
1107 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
1108 msk_setvlan(sc_if, ifp);
1110 if (ifp->if_mtu > ETHERMTU &&
1111 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
1112 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
1113 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1115 VLAN_CAPABILITIES(ifp);
1116 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1117 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1118 msk_init_locked(sc_if);
1120 MSK_IF_UNLOCK(sc_if);
1123 error = ether_ioctl(ifp, command, data);
1131 mskc_probe(device_t dev)
1133 struct msk_product *mp;
1134 uint16_t vendor, devid;
1137 vendor = pci_get_vendor(dev);
1138 devid = pci_get_device(dev);
1140 for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]);
1142 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
1143 device_set_desc(dev, mp->msk_name);
1144 return (BUS_PROBE_DEFAULT);
1152 mskc_setup_rambuffer(struct msk_softc *sc)
1157 /* Get adapter SRAM size. */
1158 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
1160 device_printf(sc->msk_dev,
1161 "RAM buffer size : %dKB\n", sc->msk_ramsize);
1162 if (sc->msk_ramsize == 0)
1165 sc->msk_pflags |= MSK_FLAG_RAMBUF;
1167 * Give receiver 2/3 of memory and round down to the multiple
1168 * of 1024. Tx/Rx RAM buffer size of Yukon II should be multiple
1171 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
1172 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
1173 for (i = 0, next = 0; i < sc->msk_num_port; i++) {
1174 sc->msk_rxqstart[i] = next;
1175 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
1176 next = sc->msk_rxqend[i] + 1;
1177 sc->msk_txqstart[i] = next;
1178 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
1179 next = sc->msk_txqend[i] + 1;
1181 device_printf(sc->msk_dev,
1182 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1183 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
1185 device_printf(sc->msk_dev,
1186 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1187 sc->msk_txqsize / 1024, sc->msk_txqstart[i],
1196 msk_phy_power(struct msk_softc *sc, int mode)
1202 case MSK_PHY_POWERUP:
1203 /* Switch power to VCC (WA for VAUX problem). */
1204 CSR_WRITE_1(sc, B0_POWER_CTRL,
1205 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1206 /* Disable Core Clock Division, set Clock Select to 0. */
1207 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1210 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1211 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1212 /* Enable bits are inverted. */
1213 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1214 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1215 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1218 * Enable PCI & Core Clock, enable clock gating for both Links.
1220 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1222 our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1223 our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1224 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
1225 if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1226 /* Deassert Low Power for 1st PHY. */
1227 our |= PCI_Y2_PHY1_COMA;
1228 if (sc->msk_num_port > 1)
1229 our |= PCI_Y2_PHY2_COMA;
1232 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U ||
1233 sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1234 sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) {
1235 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4);
1236 val &= (PCI_FORCE_ASPM_REQUEST |
1237 PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY |
1238 PCI_ASPM_CLKRUN_REQUEST);
1239 /* Set all bits to 0 except bits 15..12. */
1240 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val);
1241 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5);
1242 val &= PCI_CTL_TIM_VMAIN_AV_MSK;
1243 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val);
1244 CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0);
1245 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1247 * Disable status race, workaround for
1248 * Yukon EC Ultra & Yukon EX.
1250 val = CSR_READ_4(sc, B2_GP_IO);
1251 val |= GLB_GPIO_STAT_RACE_DIS;
1252 CSR_WRITE_4(sc, B2_GP_IO, val);
1253 CSR_READ_4(sc, B2_GP_IO);
1255 /* Release PHY from PowerDown/COMA mode. */
1256 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our);
1258 for (i = 0; i < sc->msk_num_port; i++) {
1259 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1261 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1265 case MSK_PHY_POWERDOWN:
1266 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1267 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1268 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1269 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1270 val &= ~PCI_Y2_PHY1_COMA;
1271 if (sc->msk_num_port > 1)
1272 val &= ~PCI_Y2_PHY2_COMA;
1274 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
1276 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1277 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1278 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1279 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1280 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1281 /* Enable bits are inverted. */
1285 * Disable PCI & Core Clock, disable clock gating for
1288 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1289 CSR_WRITE_1(sc, B0_POWER_CTRL,
1290 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1298 mskc_reset(struct msk_softc *sc)
1306 if (sc->msk_hw_id >= CHIP_ID_YUKON_XL &&
1307 sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) {
1308 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1309 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
1310 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
1311 status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
1312 /* Clear AHB bridge & microcontroller reset. */
1313 status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
1314 Y2_ASF_HCU_CCSR_CPU_RST_MODE);
1315 /* Clear ASF microcontroller state. */
1316 status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK;
1317 status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK;
1318 CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
1319 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
1321 CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1322 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1324 * Since we disabled ASF, S/W reset is required for
1327 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1328 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1331 /* Clear all error bits in the PCI status register. */
1332 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1333 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1335 pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1336 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1337 PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
1338 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1340 switch (sc->msk_bustype) {
1342 /* Clear all PEX errors. */
1343 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1344 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1345 if ((val & PEX_RX_OV) != 0) {
1346 sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1347 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1352 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
1353 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1355 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1356 if (sc->msk_bustype == MSK_PCIX_BUS) {
1357 /* Set Cache Line Size opt. */
1358 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1360 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1364 /* Set PHY power state. */
1365 msk_phy_power(sc, MSK_PHY_POWERUP);
1367 /* Reset GPHY/GMAC Control */
1368 for (i = 0; i < sc->msk_num_port; i++) {
1369 /* GPHY Control reset. */
1370 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1371 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1372 /* GMAC Control reset. */
1373 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1374 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1375 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1376 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1377 sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
1378 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
1379 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
1383 if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR &&
1384 sc->msk_hw_rev > CHIP_REV_YU_SU_B0)
1385 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS);
1386 if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) {
1387 /* Disable PCIe PHY powerdown(reg 0x80, bit7). */
1388 CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080);
1390 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1393 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1395 /* Clear TWSI IRQ. */
1396 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1398 /* Turn off hardware timer. */
1399 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1400 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1402 /* Turn off descriptor polling. */
1403 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1405 /* Turn off time stamps. */
1406 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1407 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1410 if (sc->msk_hw_id == CHIP_ID_YUKON_XL ||
1411 sc->msk_hw_id == CHIP_ID_YUKON_EC ||
1412 sc->msk_hw_id == CHIP_ID_YUKON_FE)
1415 /* Configure timeout values. */
1416 for (i = 0; initram > 0 && i < sc->msk_num_port; i++) {
1417 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1418 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1419 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1421 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1423 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1425 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1427 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1429 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1431 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1433 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1435 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1437 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1439 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1441 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1445 /* Disable all interrupts. */
1446 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1447 CSR_READ_4(sc, B0_HWE_IMSK);
1448 CSR_WRITE_4(sc, B0_IMSK, 0);
1449 CSR_READ_4(sc, B0_IMSK);
1452 * On dual port PCI-X card, there is an problem where status
1453 * can be received out of order due to split transactions.
1455 if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) {
1458 pcix_cmd = pci_read_config(sc->msk_dev,
1459 sc->msk_pcixcap + PCIXR_COMMAND, 2);
1460 /* Clear Max Outstanding Split Transactions. */
1461 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS;
1462 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1463 pci_write_config(sc->msk_dev,
1464 sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2);
1465 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1467 if (sc->msk_expcap != 0) {
1468 /* Change Max. Read Request Size to 2048 bytes. */
1469 if (pci_get_max_read_req(sc->msk_dev) == 512)
1470 pci_set_max_read_req(sc->msk_dev, 2048);
1473 /* Clear status list. */
1474 bzero(sc->msk_stat_ring,
1475 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1476 sc->msk_stat_cons = 0;
1477 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1478 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1479 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1480 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1481 /* Set the status list base address. */
1482 addr = sc->msk_stat_ring_paddr;
1483 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1484 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1485 /* Set the status list last index. */
1486 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1487 if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1488 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1489 /* WA for dev. #4.3 */
1490 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1491 /* WA for dev. #4.18 */
1492 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1493 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1495 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1496 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1497 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1498 sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1499 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1501 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1502 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1505 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1507 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1509 /* Enable status unit. */
1510 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1512 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1513 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1514 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1518 msk_probe(device_t dev)
1520 struct msk_softc *sc;
1523 sc = device_get_softc(device_get_parent(dev));
1525 * Not much to do here. We always know there will be
1526 * at least one GMAC present, and if there are two,
1527 * mskc_attach() will create a second device instance
1530 snprintf(desc, sizeof(desc),
1531 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1532 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1534 device_set_desc_copy(dev, desc);
1536 return (BUS_PROBE_DEFAULT);
1540 msk_attach(device_t dev)
1542 struct msk_softc *sc;
1543 struct msk_if_softc *sc_if;
1545 struct msk_mii_data *mmd;
1553 sc_if = device_get_softc(dev);
1554 sc = device_get_softc(device_get_parent(dev));
1555 mmd = device_get_ivars(dev);
1558 sc_if->msk_if_dev = dev;
1559 sc_if->msk_port = port;
1560 sc_if->msk_softc = sc;
1561 sc_if->msk_flags = sc->msk_pflags;
1562 sc->msk_if[port] = sc_if;
1563 /* Setup Tx/Rx queue register offsets. */
1564 if (port == MSK_PORT_A) {
1565 sc_if->msk_txq = Q_XA1;
1566 sc_if->msk_txsq = Q_XS1;
1567 sc_if->msk_rxq = Q_R1;
1569 sc_if->msk_txq = Q_XA2;
1570 sc_if->msk_txsq = Q_XS2;
1571 sc_if->msk_rxq = Q_R2;
1574 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
1575 msk_sysctl_node(sc_if);
1577 if ((error = msk_txrx_dma_alloc(sc_if) != 0))
1579 msk_rx_dma_jalloc(sc_if);
1581 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
1583 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
1587 ifp->if_softc = sc_if;
1588 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1589 ifp->if_mtu = ETHERMTU;
1590 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1591 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
1593 * Enable Rx checksum offloading if controller supports
1594 * new descriptor formant and controller is not Yukon XL.
1596 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
1597 sc->msk_hw_id != CHIP_ID_YUKON_XL)
1598 ifp->if_capabilities |= IFCAP_RXCSUM;
1599 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
1600 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
1601 ifp->if_capabilities |= IFCAP_RXCSUM;
1602 ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
1603 ifp->if_capenable = ifp->if_capabilities;
1604 ifp->if_ioctl = msk_ioctl;
1605 ifp->if_start = msk_start;
1606 ifp->if_init = msk_init;
1607 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1608 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
1609 IFQ_SET_READY(&ifp->if_snd);
1611 * Get station address for this interface. Note that
1612 * dual port cards actually come with three station
1613 * addresses: one for each port, plus an extra. The
1614 * extra one is used by the SysKonnect driver software
1615 * as a 'virtual' station address for when both ports
1616 * are operating in failover mode. Currently we don't
1617 * use this extra address.
1620 for (i = 0; i < ETHER_ADDR_LEN; i++)
1621 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1624 * Call MI attach routine. Can't hold locks when calling into ether_*.
1626 MSK_IF_UNLOCK(sc_if);
1627 ether_ifattach(ifp, eaddr);
1630 /* VLAN capability setup */
1631 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1632 if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) {
1634 * Due to Tx checksum offload hardware bugs, msk(4) manually
1635 * computes checksum for short frames. For VLAN tagged frames
1636 * this workaround does not work so disable checksum offload
1637 * for VLAN interface.
1639 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO;
1641 * Enable Rx checksum offloading for VLAN tagged frames
1642 * if controller support new descriptor format.
1644 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
1645 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
1646 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1648 ifp->if_capenable = ifp->if_capabilities;
1651 * Tell the upper layer(s) we support long frames.
1652 * Must appear after the call to ether_ifattach() because
1653 * ether_ifattach() sets ifi_hdrlen to the default value.
1655 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1660 MSK_IF_UNLOCK(sc_if);
1661 error = mii_attach(dev, &sc_if->msk_miibus, ifp, msk_mediachange,
1662 msk_mediastatus, BMSR_DEFCAPMASK, PHY_ADDR_MARV, MII_OFFSET_ANY,
1665 device_printf(sc_if->msk_if_dev, "attaching PHYs failed\n");
1666 ether_ifdetach(ifp);
1673 /* Access should be ok even though lock has been dropped */
1674 sc->msk_if[port] = NULL;
1682 * Attach the interface. Allocate softc structures, do ifmedia
1683 * setup and ethernet/BPF attach.
1686 mskc_attach(device_t dev)
1688 struct msk_softc *sc;
1689 struct msk_mii_data *mmd;
1690 int error, msic, msir, reg;
1692 sc = device_get_softc(dev);
1694 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1698 * Map control/status registers.
1700 pci_enable_busmaster(dev);
1702 /* Allocate I/O resource */
1703 #ifdef MSK_USEIOSPACE
1704 sc->msk_res_spec = msk_res_spec_io;
1706 sc->msk_res_spec = msk_res_spec_mem;
1708 sc->msk_irq_spec = msk_irq_spec_legacy;
1709 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1711 if (sc->msk_res_spec == msk_res_spec_mem)
1712 sc->msk_res_spec = msk_res_spec_io;
1714 sc->msk_res_spec = msk_res_spec_mem;
1715 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1717 device_printf(dev, "couldn't allocate %s resources\n",
1718 sc->msk_res_spec == msk_res_spec_mem ? "memory" :
1720 mtx_destroy(&sc->msk_mtx);
1725 /* Enable all clocks before accessing any registers. */
1726 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
1728 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1729 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1730 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1731 /* Bail out if chip is not recognized. */
1732 if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1733 sc->msk_hw_id > CHIP_ID_YUKON_OPT ||
1734 sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) {
1735 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1736 sc->msk_hw_id, sc->msk_hw_rev);
1737 mtx_destroy(&sc->msk_mtx);
1741 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1742 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1743 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1744 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
1745 "max number of Rx events to process");
1747 sc->msk_process_limit = MSK_PROC_DEFAULT;
1748 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1749 "process_limit", &sc->msk_process_limit);
1751 if (sc->msk_process_limit < MSK_PROC_MIN ||
1752 sc->msk_process_limit > MSK_PROC_MAX) {
1753 device_printf(dev, "process_limit value out of range; "
1754 "using default: %d\n", MSK_PROC_DEFAULT);
1755 sc->msk_process_limit = MSK_PROC_DEFAULT;
1759 sc->msk_int_holdoff = MSK_INT_HOLDOFF_DEFAULT;
1760 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1761 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
1762 "int_holdoff", CTLFLAG_RW, &sc->msk_int_holdoff, 0,
1763 "Maximum number of time to delay interrupts");
1764 resource_int_value(device_get_name(dev), device_get_unit(dev),
1765 "int_holdoff", &sc->msk_int_holdoff);
1767 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1768 /* Check number of MACs. */
1769 sc->msk_num_port = 1;
1770 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1772 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1776 /* Check bus type. */
1777 if (pci_find_cap(sc->msk_dev, PCIY_EXPRESS, ®) == 0) {
1778 sc->msk_bustype = MSK_PEX_BUS;
1779 sc->msk_expcap = reg;
1780 } else if (pci_find_cap(sc->msk_dev, PCIY_PCIX, ®) == 0) {
1781 sc->msk_bustype = MSK_PCIX_BUS;
1782 sc->msk_pcixcap = reg;
1784 sc->msk_bustype = MSK_PCI_BUS;
1786 switch (sc->msk_hw_id) {
1787 case CHIP_ID_YUKON_EC:
1788 sc->msk_clock = 125; /* 125 MHz */
1789 sc->msk_pflags |= MSK_FLAG_JUMBO;
1791 case CHIP_ID_YUKON_EC_U:
1792 sc->msk_clock = 125; /* 125 MHz */
1793 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM;
1795 case CHIP_ID_YUKON_EX:
1796 sc->msk_clock = 125; /* 125 MHz */
1797 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
1798 MSK_FLAG_AUTOTX_CSUM;
1800 * Yukon Extreme seems to have silicon bug for
1801 * automatic Tx checksum calculation capability.
1803 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
1804 sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM;
1806 * Yukon Extreme A0 could not use store-and-forward
1807 * for jumbo frames, so disable Tx checksum
1808 * offloading for jumbo frames.
1810 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
1811 sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM;
1813 case CHIP_ID_YUKON_FE:
1814 sc->msk_clock = 100; /* 100 MHz */
1815 sc->msk_pflags |= MSK_FLAG_FASTETHER;
1817 case CHIP_ID_YUKON_FE_P:
1818 sc->msk_clock = 50; /* 50 MHz */
1819 sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 |
1820 MSK_FLAG_AUTOTX_CSUM;
1821 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
1824 * FE+ A0 has status LE writeback bug so msk(4)
1825 * does not rely on status word of received frame
1826 * in msk_rxeof() which in turn disables all
1827 * hardware assistance bits reported by the status
1828 * word as well as validity of the received frame.
1829 * Just pass received frames to upper stack with
1830 * minimal test and let upper stack handle them.
1832 sc->msk_pflags |= MSK_FLAG_NOHWVLAN |
1833 MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM;
1836 case CHIP_ID_YUKON_XL:
1837 sc->msk_clock = 156; /* 156 MHz */
1838 sc->msk_pflags |= MSK_FLAG_JUMBO;
1840 case CHIP_ID_YUKON_SUPR:
1841 sc->msk_clock = 125; /* 125 MHz */
1842 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
1843 MSK_FLAG_AUTOTX_CSUM;
1845 case CHIP_ID_YUKON_UL_2:
1846 sc->msk_clock = 125; /* 125 MHz */
1847 sc->msk_pflags |= MSK_FLAG_JUMBO;
1849 case CHIP_ID_YUKON_OPT:
1850 sc->msk_clock = 125; /* 125 MHz */
1851 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2;
1854 sc->msk_clock = 156; /* 156 MHz */
1858 /* Allocate IRQ resources. */
1859 msic = pci_msi_count(dev);
1861 device_printf(dev, "MSI count : %d\n", msic);
1862 if (legacy_intr != 0)
1864 if (msi_disable == 0 && msic > 0) {
1866 if (pci_alloc_msi(dev, &msir) == 0) {
1868 sc->msk_pflags |= MSK_FLAG_MSI;
1869 sc->msk_irq_spec = msk_irq_spec_msi;
1871 pci_release_msi(dev);
1875 error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1877 device_printf(dev, "couldn't allocate IRQ resources\n");
1881 if ((error = msk_status_dma_alloc(sc)) != 0)
1884 /* Set base interrupt mask. */
1885 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1886 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1887 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1889 /* Reset the adapter. */
1892 if ((error = mskc_setup_rambuffer(sc)) != 0)
1895 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1896 if (sc->msk_devs[MSK_PORT_A] == NULL) {
1897 device_printf(dev, "failed to add child for PORT_A\n");
1901 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO);
1903 device_printf(dev, "failed to allocate memory for "
1904 "ivars of PORT_A\n");
1908 mmd->port = MSK_PORT_A;
1909 mmd->pmd = sc->msk_pmd;
1910 mmd->mii_flags |= MIIF_DOPAUSE;
1911 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1912 mmd->mii_flags |= MIIF_HAVEFIBER;
1913 if (sc->msk_pmd == 'P')
1914 mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
1915 device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd);
1917 if (sc->msk_num_port > 1) {
1918 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1919 if (sc->msk_devs[MSK_PORT_B] == NULL) {
1920 device_printf(dev, "failed to add child for PORT_B\n");
1924 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK |
1927 device_printf(dev, "failed to allocate memory for "
1928 "ivars of PORT_B\n");
1932 mmd->port = MSK_PORT_B;
1933 mmd->pmd = sc->msk_pmd;
1934 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1935 mmd->mii_flags |= MIIF_HAVEFIBER;
1936 if (sc->msk_pmd == 'P')
1937 mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
1938 device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd);
1941 error = bus_generic_attach(dev);
1943 device_printf(dev, "failed to attach port(s)\n");
1947 /* Hook interrupt last to avoid having to lock softc. */
1948 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1949 INTR_MPSAFE, NULL, msk_intr, sc, &sc->msk_intrhand);
1951 device_printf(dev, "couldn't set up interrupt handler\n");
1962 * Shutdown hardware and free up resources. This can be called any
1963 * time after the mutex has been initialized. It is called in both
1964 * the error case in attach and the normal detach case so it needs
1965 * to be careful about only freeing resources that have actually been
1969 msk_detach(device_t dev)
1971 struct msk_softc *sc;
1972 struct msk_if_softc *sc_if;
1975 sc_if = device_get_softc(dev);
1976 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
1977 ("msk mutex not initialized in msk_detach"));
1980 ifp = sc_if->msk_ifp;
1981 if (device_is_attached(dev)) {
1983 sc_if->msk_flags |= MSK_FLAG_DETACH;
1985 /* Can't hold locks while calling detach. */
1986 MSK_IF_UNLOCK(sc_if);
1987 callout_drain(&sc_if->msk_tick_ch);
1989 ether_ifdetach(ifp);
1994 * We're generally called from mskc_detach() which is using
1995 * device_delete_child() to get to here. It's already trashed
1996 * miibus for us, so don't do it here or we'll panic.
1998 * if (sc_if->msk_miibus != NULL) {
1999 * device_delete_child(dev, sc_if->msk_miibus);
2000 * sc_if->msk_miibus = NULL;
2004 msk_rx_dma_jfree(sc_if);
2005 msk_txrx_dma_free(sc_if);
2006 bus_generic_detach(dev);
2010 sc = sc_if->msk_softc;
2011 sc->msk_if[sc_if->msk_port] = NULL;
2012 MSK_IF_UNLOCK(sc_if);
2018 mskc_detach(device_t dev)
2020 struct msk_softc *sc;
2022 sc = device_get_softc(dev);
2023 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
2025 if (device_is_alive(dev)) {
2026 if (sc->msk_devs[MSK_PORT_A] != NULL) {
2027 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
2029 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
2031 if (sc->msk_devs[MSK_PORT_B] != NULL) {
2032 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
2034 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
2036 bus_generic_detach(dev);
2039 /* Disable all interrupts. */
2040 CSR_WRITE_4(sc, B0_IMSK, 0);
2041 CSR_READ_4(sc, B0_IMSK);
2042 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2043 CSR_READ_4(sc, B0_HWE_IMSK);
2046 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
2048 /* Put hardware reset. */
2049 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2051 msk_status_dma_free(sc);
2053 if (sc->msk_intrhand) {
2054 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand);
2055 sc->msk_intrhand = NULL;
2057 bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
2058 if ((sc->msk_pflags & MSK_FLAG_MSI) != 0)
2059 pci_release_msi(dev);
2060 bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
2061 mtx_destroy(&sc->msk_mtx);
2066 struct msk_dmamap_arg {
2067 bus_addr_t msk_busaddr;
2071 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2073 struct msk_dmamap_arg *ctx;
2078 ctx->msk_busaddr = segs[0].ds_addr;
2081 /* Create status DMA region. */
2083 msk_status_dma_alloc(struct msk_softc *sc)
2085 struct msk_dmamap_arg ctx;
2088 error = bus_dma_tag_create(
2089 bus_get_dma_tag(sc->msk_dev), /* parent */
2090 MSK_STAT_ALIGN, 0, /* alignment, boundary */
2091 BUS_SPACE_MAXADDR, /* lowaddr */
2092 BUS_SPACE_MAXADDR, /* highaddr */
2093 NULL, NULL, /* filter, filterarg */
2094 MSK_STAT_RING_SZ, /* maxsize */
2096 MSK_STAT_RING_SZ, /* maxsegsize */
2098 NULL, NULL, /* lockfunc, lockarg */
2101 device_printf(sc->msk_dev,
2102 "failed to create status DMA tag\n");
2106 /* Allocate DMA'able memory and load the DMA map for status ring. */
2107 error = bus_dmamem_alloc(sc->msk_stat_tag,
2108 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
2109 BUS_DMA_ZERO, &sc->msk_stat_map);
2111 device_printf(sc->msk_dev,
2112 "failed to allocate DMA'able memory for status ring\n");
2116 ctx.msk_busaddr = 0;
2117 error = bus_dmamap_load(sc->msk_stat_tag,
2118 sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ,
2119 msk_dmamap_cb, &ctx, 0);
2121 device_printf(sc->msk_dev,
2122 "failed to load DMA'able memory for status ring\n");
2125 sc->msk_stat_ring_paddr = ctx.msk_busaddr;
2131 msk_status_dma_free(struct msk_softc *sc)
2134 /* Destroy status block. */
2135 if (sc->msk_stat_tag) {
2136 if (sc->msk_stat_map) {
2137 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
2138 if (sc->msk_stat_ring) {
2139 bus_dmamem_free(sc->msk_stat_tag,
2140 sc->msk_stat_ring, sc->msk_stat_map);
2141 sc->msk_stat_ring = NULL;
2143 sc->msk_stat_map = NULL;
2145 bus_dma_tag_destroy(sc->msk_stat_tag);
2146 sc->msk_stat_tag = NULL;
2151 msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
2153 struct msk_dmamap_arg ctx;
2154 struct msk_txdesc *txd;
2155 struct msk_rxdesc *rxd;
2159 /* Create parent DMA tag. */
2162 * It seems that Yukon II supports full 64bits DMA operations. But
2163 * it needs two descriptors(list elements) for 64bits DMA operations.
2164 * Since we don't know what DMA address mappings(32bits or 64bits)
2165 * would be used in advance for each mbufs, we limits its DMA space
2166 * to be in range of 32bits address space. Otherwise, we should check
2167 * what DMA address is used and chain another descriptor for the
2168 * 64bits DMA operation. This also means descriptor ring size is
2169 * variable. Limiting DMA address to be in 32bit address space greatly
2170 * simplifies descriptor handling and possibly would increase
2171 * performance a bit due to efficient handling of descriptors.
2172 * Apart from harassing checksum offloading mechanisms, it seems
2173 * it's really bad idea to use a separate descriptor for 64bit
2174 * DMA operation to save small descriptor memory. Anyway, I've
2175 * never seen these exotic scheme on ethernet interface hardware.
2177 error = bus_dma_tag_create(
2178 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
2179 1, 0, /* alignment, boundary */
2180 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2181 BUS_SPACE_MAXADDR, /* highaddr */
2182 NULL, NULL, /* filter, filterarg */
2183 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2185 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
2187 NULL, NULL, /* lockfunc, lockarg */
2188 &sc_if->msk_cdata.msk_parent_tag);
2190 device_printf(sc_if->msk_if_dev,
2191 "failed to create parent DMA tag\n");
2194 /* Create tag for Tx ring. */
2195 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2196 MSK_RING_ALIGN, 0, /* alignment, boundary */
2197 BUS_SPACE_MAXADDR, /* lowaddr */
2198 BUS_SPACE_MAXADDR, /* highaddr */
2199 NULL, NULL, /* filter, filterarg */
2200 MSK_TX_RING_SZ, /* maxsize */
2202 MSK_TX_RING_SZ, /* maxsegsize */
2204 NULL, NULL, /* lockfunc, lockarg */
2205 &sc_if->msk_cdata.msk_tx_ring_tag);
2207 device_printf(sc_if->msk_if_dev,
2208 "failed to create Tx ring DMA tag\n");
2212 /* Create tag for Rx ring. */
2213 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2214 MSK_RING_ALIGN, 0, /* alignment, boundary */
2215 BUS_SPACE_MAXADDR, /* lowaddr */
2216 BUS_SPACE_MAXADDR, /* highaddr */
2217 NULL, NULL, /* filter, filterarg */
2218 MSK_RX_RING_SZ, /* maxsize */
2220 MSK_RX_RING_SZ, /* maxsegsize */
2222 NULL, NULL, /* lockfunc, lockarg */
2223 &sc_if->msk_cdata.msk_rx_ring_tag);
2225 device_printf(sc_if->msk_if_dev,
2226 "failed to create Rx ring DMA tag\n");
2230 /* Create tag for Tx buffers. */
2231 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2232 1, 0, /* alignment, boundary */
2233 BUS_SPACE_MAXADDR, /* lowaddr */
2234 BUS_SPACE_MAXADDR, /* highaddr */
2235 NULL, NULL, /* filter, filterarg */
2236 MSK_TSO_MAXSIZE, /* maxsize */
2237 MSK_MAXTXSEGS, /* nsegments */
2238 MSK_TSO_MAXSGSIZE, /* maxsegsize */
2240 NULL, NULL, /* lockfunc, lockarg */
2241 &sc_if->msk_cdata.msk_tx_tag);
2243 device_printf(sc_if->msk_if_dev,
2244 "failed to create Tx DMA tag\n");
2250 * Workaround hardware hang which seems to happen when Rx buffer
2251 * is not aligned on multiple of FIFO word(8 bytes).
2253 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2254 rxalign = MSK_RX_BUF_ALIGN;
2255 /* Create tag for Rx buffers. */
2256 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2257 rxalign, 0, /* alignment, boundary */
2258 BUS_SPACE_MAXADDR, /* lowaddr */
2259 BUS_SPACE_MAXADDR, /* highaddr */
2260 NULL, NULL, /* filter, filterarg */
2261 MCLBYTES, /* maxsize */
2263 MCLBYTES, /* maxsegsize */
2265 NULL, NULL, /* lockfunc, lockarg */
2266 &sc_if->msk_cdata.msk_rx_tag);
2268 device_printf(sc_if->msk_if_dev,
2269 "failed to create Rx DMA tag\n");
2273 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
2274 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
2275 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
2276 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
2278 device_printf(sc_if->msk_if_dev,
2279 "failed to allocate DMA'able memory for Tx ring\n");
2283 ctx.msk_busaddr = 0;
2284 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
2285 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
2286 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2288 device_printf(sc_if->msk_if_dev,
2289 "failed to load DMA'able memory for Tx ring\n");
2292 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
2294 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
2295 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
2296 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
2297 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
2299 device_printf(sc_if->msk_if_dev,
2300 "failed to allocate DMA'able memory for Rx ring\n");
2304 ctx.msk_busaddr = 0;
2305 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
2306 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
2307 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2309 device_printf(sc_if->msk_if_dev,
2310 "failed to load DMA'able memory for Rx ring\n");
2313 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
2315 /* Create DMA maps for Tx buffers. */
2316 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2317 txd = &sc_if->msk_cdata.msk_txdesc[i];
2319 txd->tx_dmamap = NULL;
2320 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
2323 device_printf(sc_if->msk_if_dev,
2324 "failed to create Tx dmamap\n");
2328 /* Create DMA maps for Rx buffers. */
2329 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2330 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
2331 device_printf(sc_if->msk_if_dev,
2332 "failed to create spare Rx dmamap\n");
2335 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2336 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2338 rxd->rx_dmamap = NULL;
2339 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2342 device_printf(sc_if->msk_if_dev,
2343 "failed to create Rx dmamap\n");
2353 msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
2355 struct msk_dmamap_arg ctx;
2356 struct msk_rxdesc *jrxd;
2360 if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
2361 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2362 device_printf(sc_if->msk_if_dev,
2363 "disabling jumbo frame support\n");
2366 /* Create tag for jumbo Rx ring. */
2367 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2368 MSK_RING_ALIGN, 0, /* alignment, boundary */
2369 BUS_SPACE_MAXADDR, /* lowaddr */
2370 BUS_SPACE_MAXADDR, /* highaddr */
2371 NULL, NULL, /* filter, filterarg */
2372 MSK_JUMBO_RX_RING_SZ, /* maxsize */
2374 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */
2376 NULL, NULL, /* lockfunc, lockarg */
2377 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2379 device_printf(sc_if->msk_if_dev,
2380 "failed to create jumbo Rx ring DMA tag\n");
2386 * Workaround hardware hang which seems to happen when Rx buffer
2387 * is not aligned on multiple of FIFO word(8 bytes).
2389 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2390 rxalign = MSK_RX_BUF_ALIGN;
2391 /* Create tag for jumbo Rx buffers. */
2392 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2393 rxalign, 0, /* alignment, boundary */
2394 BUS_SPACE_MAXADDR, /* lowaddr */
2395 BUS_SPACE_MAXADDR, /* highaddr */
2396 NULL, NULL, /* filter, filterarg */
2397 MJUM9BYTES, /* maxsize */
2399 MJUM9BYTES, /* maxsegsize */
2401 NULL, NULL, /* lockfunc, lockarg */
2402 &sc_if->msk_cdata.msk_jumbo_rx_tag);
2404 device_printf(sc_if->msk_if_dev,
2405 "failed to create jumbo Rx DMA tag\n");
2409 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2410 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2411 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2412 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2413 &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2415 device_printf(sc_if->msk_if_dev,
2416 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2420 ctx.msk_busaddr = 0;
2421 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2422 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2423 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2424 msk_dmamap_cb, &ctx, 0);
2426 device_printf(sc_if->msk_if_dev,
2427 "failed to load DMA'able memory for jumbo Rx ring\n");
2430 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2432 /* Create DMA maps for jumbo Rx buffers. */
2433 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2434 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2435 device_printf(sc_if->msk_if_dev,
2436 "failed to create spare jumbo Rx dmamap\n");
2439 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2440 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2442 jrxd->rx_dmamap = NULL;
2443 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2446 device_printf(sc_if->msk_if_dev,
2447 "failed to create jumbo Rx dmamap\n");
2455 msk_rx_dma_jfree(sc_if);
2456 device_printf(sc_if->msk_if_dev, "disabling jumbo frame support "
2457 "due to resource shortage\n");
2458 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2463 msk_txrx_dma_free(struct msk_if_softc *sc_if)
2465 struct msk_txdesc *txd;
2466 struct msk_rxdesc *rxd;
2470 if (sc_if->msk_cdata.msk_tx_ring_tag) {
2471 if (sc_if->msk_cdata.msk_tx_ring_map)
2472 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
2473 sc_if->msk_cdata.msk_tx_ring_map);
2474 if (sc_if->msk_cdata.msk_tx_ring_map &&
2475 sc_if->msk_rdata.msk_tx_ring)
2476 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
2477 sc_if->msk_rdata.msk_tx_ring,
2478 sc_if->msk_cdata.msk_tx_ring_map);
2479 sc_if->msk_rdata.msk_tx_ring = NULL;
2480 sc_if->msk_cdata.msk_tx_ring_map = NULL;
2481 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
2482 sc_if->msk_cdata.msk_tx_ring_tag = NULL;
2485 if (sc_if->msk_cdata.msk_rx_ring_tag) {
2486 if (sc_if->msk_cdata.msk_rx_ring_map)
2487 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
2488 sc_if->msk_cdata.msk_rx_ring_map);
2489 if (sc_if->msk_cdata.msk_rx_ring_map &&
2490 sc_if->msk_rdata.msk_rx_ring)
2491 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
2492 sc_if->msk_rdata.msk_rx_ring,
2493 sc_if->msk_cdata.msk_rx_ring_map);
2494 sc_if->msk_rdata.msk_rx_ring = NULL;
2495 sc_if->msk_cdata.msk_rx_ring_map = NULL;
2496 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
2497 sc_if->msk_cdata.msk_rx_ring_tag = NULL;
2500 if (sc_if->msk_cdata.msk_tx_tag) {
2501 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2502 txd = &sc_if->msk_cdata.msk_txdesc[i];
2503 if (txd->tx_dmamap) {
2504 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2506 txd->tx_dmamap = NULL;
2509 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2510 sc_if->msk_cdata.msk_tx_tag = NULL;
2513 if (sc_if->msk_cdata.msk_rx_tag) {
2514 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2515 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2516 if (rxd->rx_dmamap) {
2517 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2519 rxd->rx_dmamap = NULL;
2522 if (sc_if->msk_cdata.msk_rx_sparemap) {
2523 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2524 sc_if->msk_cdata.msk_rx_sparemap);
2525 sc_if->msk_cdata.msk_rx_sparemap = 0;
2527 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2528 sc_if->msk_cdata.msk_rx_tag = NULL;
2530 if (sc_if->msk_cdata.msk_parent_tag) {
2531 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2532 sc_if->msk_cdata.msk_parent_tag = NULL;
2537 msk_rx_dma_jfree(struct msk_if_softc *sc_if)
2539 struct msk_rxdesc *jrxd;
2542 /* Jumbo Rx ring. */
2543 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2544 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2545 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2546 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2547 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2548 sc_if->msk_rdata.msk_jumbo_rx_ring)
2549 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2550 sc_if->msk_rdata.msk_jumbo_rx_ring,
2551 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2552 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2553 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2554 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2555 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2557 /* Jumbo Rx buffers. */
2558 if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2559 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2560 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2561 if (jrxd->rx_dmamap) {
2563 sc_if->msk_cdata.msk_jumbo_rx_tag,
2565 jrxd->rx_dmamap = NULL;
2568 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2569 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2570 sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2571 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2573 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2574 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2579 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2581 struct msk_txdesc *txd, *txd_last;
2582 struct msk_tx_desc *tx_le;
2585 bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2586 uint32_t control, csum, prod, si;
2587 uint16_t offset, tcp_offset, tso_mtu;
2588 int error, i, nseg, tso;
2590 MSK_IF_LOCK_ASSERT(sc_if);
2592 tcp_offset = offset = 0;
2594 if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
2595 (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) ||
2596 ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
2597 (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) {
2599 * Since mbuf has no protocol specific structure information
2600 * in it we have to inspect protocol information here to
2601 * setup TSO and checksum offload. I don't know why Marvell
2602 * made a such decision in chip design because other GigE
2603 * hardwares normally takes care of all these chores in
2604 * hardware. However, TSO performance of Yukon II is very
2605 * good such that it's worth to implement it.
2607 struct ether_header *eh;
2611 if (M_WRITABLE(m) == 0) {
2612 /* Get a writable copy. */
2613 m = m_dup(*m_head, M_DONTWAIT);
2622 offset = sizeof(struct ether_header);
2623 m = m_pullup(m, offset);
2628 eh = mtod(m, struct ether_header *);
2629 /* Check if hardware VLAN insertion is off. */
2630 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2631 offset = sizeof(struct ether_vlan_header);
2632 m = m_pullup(m, offset);
2638 m = m_pullup(m, offset + sizeof(struct ip));
2643 ip = (struct ip *)(mtod(m, char *) + offset);
2644 offset += (ip->ip_hl << 2);
2645 tcp_offset = offset;
2646 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2647 m = m_pullup(m, offset + sizeof(struct tcphdr));
2652 tcp = (struct tcphdr *)(mtod(m, char *) + offset);
2653 offset += (tcp->th_off << 2);
2654 } else if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
2655 (m->m_pkthdr.len < MSK_MIN_FRAMELEN) &&
2656 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2658 * It seems that Yukon II has Tx checksum offload bug
2659 * for small TCP packets that's less than 60 bytes in
2660 * size (e.g. TCP window probe packet, pure ACK packet).
2661 * Common work around like padding with zeros to make
2662 * the frame minimum ethernet frame size didn't work at
2664 * Instead of disabling checksum offload completely we
2665 * resort to S/W checksum routine when we encounter
2667 * Short UDP packets appear to be handled correctly by
2668 * Yukon II. Also I assume this bug does not happen on
2669 * controllers that use newer descriptor format or
2670 * automatic Tx checksum calculation.
2672 m = m_pullup(m, offset + sizeof(struct tcphdr));
2677 *(uint16_t *)(m->m_data + offset +
2678 m->m_pkthdr.csum_data) = in_cksum_skip(m,
2679 m->m_pkthdr.len, offset);
2680 m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2685 prod = sc_if->msk_cdata.msk_tx_prod;
2686 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2688 map = txd->tx_dmamap;
2689 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
2690 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2691 if (error == EFBIG) {
2692 m = m_collapse(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
2699 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
2700 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2706 } else if (error != 0)
2714 /* Check number of available descriptors. */
2715 if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
2716 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2717 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2725 /* Check TSO support. */
2726 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2727 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2728 tso_mtu = m->m_pkthdr.tso_segsz;
2730 tso_mtu = offset + m->m_pkthdr.tso_segsz;
2731 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
2732 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2733 tx_le->msk_addr = htole32(tso_mtu);
2734 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2735 tx_le->msk_control = htole32(OP_MSS | HW_OWNER);
2737 tx_le->msk_control =
2738 htole32(OP_LRGLEN | HW_OWNER);
2739 sc_if->msk_cdata.msk_tx_cnt++;
2740 MSK_INC(prod, MSK_TX_RING_CNT);
2741 sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
2745 /* Check if we have a VLAN tag to insert. */
2746 if ((m->m_flags & M_VLANTAG) != 0) {
2747 if (tx_le == NULL) {
2748 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2749 tx_le->msk_addr = htole32(0);
2750 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2751 htons(m->m_pkthdr.ether_vtag));
2752 sc_if->msk_cdata.msk_tx_cnt++;
2753 MSK_INC(prod, MSK_TX_RING_CNT);
2755 tx_le->msk_control |= htole32(OP_VLAN |
2756 htons(m->m_pkthdr.ether_vtag));
2758 control |= INS_VLAN;
2760 /* Check if we have to handle checksum offload. */
2761 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
2762 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0)
2765 control |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2766 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2768 /* Checksum write position. */
2769 csum = (tcp_offset + m->m_pkthdr.csum_data) & 0xffff;
2770 /* Checksum start position. */
2771 csum |= (uint32_t)tcp_offset << 16;
2772 if (csum != sc_if->msk_cdata.msk_last_csum) {
2773 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2774 tx_le->msk_addr = htole32(csum);
2775 tx_le->msk_control = htole32(1 << 16 |
2776 (OP_TCPLISW | HW_OWNER));
2777 sc_if->msk_cdata.msk_tx_cnt++;
2778 MSK_INC(prod, MSK_TX_RING_CNT);
2779 sc_if->msk_cdata.msk_last_csum = csum;
2785 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2786 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2788 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2791 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2793 sc_if->msk_cdata.msk_tx_cnt++;
2794 MSK_INC(prod, MSK_TX_RING_CNT);
2796 for (i = 1; i < nseg; i++) {
2797 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2798 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2799 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2800 OP_BUFFER | HW_OWNER);
2801 sc_if->msk_cdata.msk_tx_cnt++;
2802 MSK_INC(prod, MSK_TX_RING_CNT);
2804 /* Update producer index. */
2805 sc_if->msk_cdata.msk_tx_prod = prod;
2807 /* Set EOP on the last descriptor. */
2808 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2809 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2810 tx_le->msk_control |= htole32(EOP);
2812 /* Turn the first descriptor ownership to hardware. */
2813 tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2814 tx_le->msk_control |= htole32(HW_OWNER);
2816 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2817 map = txd_last->tx_dmamap;
2818 txd_last->tx_dmamap = txd->tx_dmamap;
2819 txd->tx_dmamap = map;
2822 /* Sync descriptors. */
2823 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2824 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2825 sc_if->msk_cdata.msk_tx_ring_map,
2826 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2832 msk_start(struct ifnet *ifp)
2834 struct msk_if_softc *sc_if;
2836 sc_if = ifp->if_softc;
2838 msk_start_locked(ifp);
2839 MSK_IF_UNLOCK(sc_if);
2843 msk_start_locked(struct ifnet *ifp)
2845 struct msk_if_softc *sc_if;
2846 struct mbuf *m_head;
2849 sc_if = ifp->if_softc;
2850 MSK_IF_LOCK_ASSERT(sc_if);
2852 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2853 IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0)
2856 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2857 sc_if->msk_cdata.msk_tx_cnt <
2858 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2859 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2863 * Pack the data into the transmit ring. If we
2864 * don't have room, set the OACTIVE flag and wait
2865 * for the NIC to drain the ring.
2867 if (msk_encap(sc_if, &m_head) != 0) {
2870 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2871 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2877 * If there's a BPF listener, bounce a copy of this frame
2880 ETHER_BPF_MTAP(ifp, m_head);
2885 CSR_WRITE_2(sc_if->msk_softc,
2886 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2887 sc_if->msk_cdata.msk_tx_prod);
2889 /* Set a timeout in case the chip goes out to lunch. */
2890 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
2895 msk_watchdog(struct msk_if_softc *sc_if)
2899 MSK_IF_LOCK_ASSERT(sc_if);
2901 if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
2903 ifp = sc_if->msk_ifp;
2904 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
2906 if_printf(sc_if->msk_ifp, "watchdog timeout "
2909 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2910 msk_init_locked(sc_if);
2914 if_printf(ifp, "watchdog timeout\n");
2916 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2917 msk_init_locked(sc_if);
2918 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2919 msk_start_locked(ifp);
2923 mskc_shutdown(device_t dev)
2925 struct msk_softc *sc;
2928 sc = device_get_softc(dev);
2930 for (i = 0; i < sc->msk_num_port; i++) {
2931 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2932 ((sc->msk_if[i]->msk_ifp->if_drv_flags &
2933 IFF_DRV_RUNNING) != 0))
2934 msk_stop(sc->msk_if[i]);
2938 /* Put hardware reset. */
2939 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2944 mskc_suspend(device_t dev)
2946 struct msk_softc *sc;
2949 sc = device_get_softc(dev);
2953 for (i = 0; i < sc->msk_num_port; i++) {
2954 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2955 ((sc->msk_if[i]->msk_ifp->if_drv_flags &
2956 IFF_DRV_RUNNING) != 0))
2957 msk_stop(sc->msk_if[i]);
2960 /* Disable all interrupts. */
2961 CSR_WRITE_4(sc, B0_IMSK, 0);
2962 CSR_READ_4(sc, B0_IMSK);
2963 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2964 CSR_READ_4(sc, B0_HWE_IMSK);
2966 msk_phy_power(sc, MSK_PHY_POWERDOWN);
2968 /* Put hardware reset. */
2969 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2970 sc->msk_pflags |= MSK_FLAG_SUSPEND;
2978 mskc_resume(device_t dev)
2980 struct msk_softc *sc;
2983 sc = device_get_softc(dev);
2987 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
2989 for (i = 0; i < sc->msk_num_port; i++) {
2990 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2991 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) {
2992 sc->msk_if[i]->msk_ifp->if_drv_flags &=
2994 msk_init_locked(sc->msk_if[i]);
2997 sc->msk_pflags &= ~MSK_FLAG_SUSPEND;
3004 #ifndef __NO_STRICT_ALIGNMENT
3005 static __inline void
3006 msk_fixup_rx(struct mbuf *m)
3009 uint16_t *src, *dst;
3011 src = mtod(m, uint16_t *);
3014 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
3017 m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
3021 static __inline void
3022 msk_rxcsum(struct msk_if_softc *sc_if, uint32_t control, struct mbuf *m)
3024 struct ether_header *eh;
3027 int32_t hlen, len, pktlen, temp32;
3028 uint16_t csum, *opts;
3030 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) {
3031 if ((control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
3032 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3033 if ((control & CSS_IPV4_CSUM_OK) != 0)
3034 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3035 if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
3036 (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
3037 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3039 m->m_pkthdr.csum_data = 0xffff;
3045 * Marvell Yukon controllers that support OP_RXCHKS has known
3046 * to have various Rx checksum offloading bugs. These
3047 * controllers can be configured to compute simple checksum
3048 * at two different positions. So we can compute IP and TCP/UDP
3049 * checksum at the same time. We intentionally have controller
3050 * compute TCP/UDP checksum twice by specifying the same
3051 * checksum start position and compare the result. If the value
3052 * is different it would indicate the hardware logic was wrong.
3054 if ((sc_if->msk_csum & 0xFFFF) != (sc_if->msk_csum >> 16)) {
3056 device_printf(sc_if->msk_if_dev,
3057 "Rx checksum value mismatch!\n");
3060 pktlen = m->m_pkthdr.len;
3061 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
3063 eh = mtod(m, struct ether_header *);
3064 if (eh->ether_type != htons(ETHERTYPE_IP))
3066 ip = (struct ip *)(eh + 1);
3067 if (ip->ip_v != IPVERSION)
3070 hlen = ip->ip_hl << 2;
3071 pktlen -= sizeof(struct ether_header);
3072 if (hlen < sizeof(struct ip))
3074 if (ntohs(ip->ip_len) < hlen)
3076 if (ntohs(ip->ip_len) != pktlen)
3078 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
3079 return; /* can't handle fragmented packet. */
3083 if (pktlen < (hlen + sizeof(struct tcphdr)))
3087 if (pktlen < (hlen + sizeof(struct udphdr)))
3089 uh = (struct udphdr *)((caddr_t)ip + hlen);
3090 if (uh->uh_sum == 0)
3091 return; /* no checksum */
3096 csum = bswap16(sc_if->msk_csum & 0xFFFF);
3097 /* Checksum fixup for IP options. */
3098 len = hlen - sizeof(struct ip);
3100 opts = (uint16_t *)(ip + 1);
3101 for (; len > 0; len -= sizeof(uint16_t), opts++) {
3102 temp32 = csum - *opts;
3103 temp32 = (temp32 >> 16) + (temp32 & 65535);
3104 csum = temp32 & 65535;
3107 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
3108 m->m_pkthdr.csum_data = csum;
3112 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
3117 struct msk_rxdesc *rxd;
3120 ifp = sc_if->msk_ifp;
3122 MSK_IF_LOCK_ASSERT(sc_if);
3124 cons = sc_if->msk_cdata.msk_rx_cons;
3126 rxlen = status >> 16;
3127 if ((status & GMR_FS_VLAN) != 0 &&
3128 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3129 rxlen -= ETHER_VLAN_ENCAP_LEN;
3130 if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) {
3132 * For controllers that returns bogus status code
3133 * just do minimal check and let upper stack
3134 * handle this frame.
3136 if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
3138 msk_discard_rxbuf(sc_if, cons);
3141 } else if (len > sc_if->msk_framesize ||
3142 ((status & GMR_FS_ANY_ERR) != 0) ||
3143 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3144 /* Don't count flow-control packet as errors. */
3145 if ((status & GMR_FS_GOOD_FC) == 0)
3147 msk_discard_rxbuf(sc_if, cons);
3150 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
3152 if (msk_newbuf(sc_if, cons) != 0) {
3154 /* Reuse old buffer. */
3155 msk_discard_rxbuf(sc_if, cons);
3158 m->m_pkthdr.rcvif = ifp;
3159 m->m_pkthdr.len = m->m_len = len;
3160 #ifndef __NO_STRICT_ALIGNMENT
3161 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
3165 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3166 msk_rxcsum(sc_if, control, m);
3167 /* Check for VLAN tagged packets. */
3168 if ((status & GMR_FS_VLAN) != 0 &&
3169 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3170 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3171 m->m_flags |= M_VLANTAG;
3173 MSK_IF_UNLOCK(sc_if);
3174 (*ifp->if_input)(ifp, m);
3178 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
3179 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
3183 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
3188 struct msk_rxdesc *jrxd;
3191 ifp = sc_if->msk_ifp;
3193 MSK_IF_LOCK_ASSERT(sc_if);
3195 cons = sc_if->msk_cdata.msk_rx_cons;
3197 rxlen = status >> 16;
3198 if ((status & GMR_FS_VLAN) != 0 &&
3199 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3200 rxlen -= ETHER_VLAN_ENCAP_LEN;
3201 if (len > sc_if->msk_framesize ||
3202 ((status & GMR_FS_ANY_ERR) != 0) ||
3203 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3204 /* Don't count flow-control packet as errors. */
3205 if ((status & GMR_FS_GOOD_FC) == 0)
3207 msk_discard_jumbo_rxbuf(sc_if, cons);
3210 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
3212 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
3214 /* Reuse old buffer. */
3215 msk_discard_jumbo_rxbuf(sc_if, cons);
3218 m->m_pkthdr.rcvif = ifp;
3219 m->m_pkthdr.len = m->m_len = len;
3220 #ifndef __NO_STRICT_ALIGNMENT
3221 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
3225 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3226 msk_rxcsum(sc_if, control, m);
3227 /* Check for VLAN tagged packets. */
3228 if ((status & GMR_FS_VLAN) != 0 &&
3229 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3230 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3231 m->m_flags |= M_VLANTAG;
3233 MSK_IF_UNLOCK(sc_if);
3234 (*ifp->if_input)(ifp, m);
3238 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
3239 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
3243 msk_txeof(struct msk_if_softc *sc_if, int idx)
3245 struct msk_txdesc *txd;
3246 struct msk_tx_desc *cur_tx;
3251 MSK_IF_LOCK_ASSERT(sc_if);
3253 ifp = sc_if->msk_ifp;
3255 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
3256 sc_if->msk_cdata.msk_tx_ring_map,
3257 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3259 * Go through our tx ring and free mbufs for those
3260 * frames that have been sent.
3262 cons = sc_if->msk_cdata.msk_tx_cons;
3264 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
3265 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
3268 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
3269 control = le32toh(cur_tx->msk_control);
3270 sc_if->msk_cdata.msk_tx_cnt--;
3271 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3272 if ((control & EOP) == 0)
3274 txd = &sc_if->msk_cdata.msk_txdesc[cons];
3275 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
3276 BUS_DMASYNC_POSTWRITE);
3277 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
3280 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
3287 sc_if->msk_cdata.msk_tx_cons = cons;
3288 if (sc_if->msk_cdata.msk_tx_cnt == 0)
3289 sc_if->msk_watchdog_timer = 0;
3290 /* No need to sync LEs as we didn't update LEs. */
3295 msk_tick(void *xsc_if)
3297 struct msk_if_softc *sc_if;
3298 struct mii_data *mii;
3302 MSK_IF_LOCK_ASSERT(sc_if);
3304 mii = device_get_softc(sc_if->msk_miibus);
3307 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0)
3308 msk_miibus_statchg(sc_if->msk_if_dev);
3309 msk_handle_events(sc_if->msk_softc);
3310 msk_watchdog(sc_if);
3311 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3315 msk_intr_phy(struct msk_if_softc *sc_if)
3319 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3320 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3321 /* Handle FIFO Underrun/Overflow? */
3322 if ((status & PHY_M_IS_FIFO_ERROR))
3323 device_printf(sc_if->msk_if_dev,
3324 "PHY FIFO underrun/overflow.\n");
3328 msk_intr_gmac(struct msk_if_softc *sc_if)
3330 struct msk_softc *sc;
3333 sc = sc_if->msk_softc;
3334 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3336 /* GMAC Rx FIFO overrun. */
3337 if ((status & GM_IS_RX_FF_OR) != 0)
3338 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3340 /* GMAC Tx FIFO underrun. */
3341 if ((status & GM_IS_TX_FF_UR) != 0) {
3342 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3344 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3347 * In case of Tx underrun, we may need to flush/reset
3348 * Tx MAC but that would also require resynchronization
3349 * with status LEs. Reinitializing status LEs would
3350 * affect other port in dual MAC configuration so it
3351 * should be avoided as possible as we can.
3352 * Due to lack of documentation it's all vague guess but
3353 * it needs more investigation.
3359 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3361 struct msk_softc *sc;
3363 sc = sc_if->msk_softc;
3364 if ((status & Y2_IS_PAR_RD1) != 0) {
3365 device_printf(sc_if->msk_if_dev,
3366 "RAM buffer read parity error\n");
3368 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3371 if ((status & Y2_IS_PAR_WR1) != 0) {
3372 device_printf(sc_if->msk_if_dev,
3373 "RAM buffer write parity error\n");
3375 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3378 if ((status & Y2_IS_PAR_MAC1) != 0) {
3379 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3381 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3384 if ((status & Y2_IS_PAR_RX1) != 0) {
3385 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3387 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3389 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3390 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3392 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3397 msk_intr_hwerr(struct msk_softc *sc)
3400 uint32_t tlphead[4];
3402 status = CSR_READ_4(sc, B0_HWE_ISRC);
3403 /* Time Stamp timer overflow. */
3404 if ((status & Y2_IS_TIST_OV) != 0)
3405 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3406 if ((status & Y2_IS_PCI_NEXP) != 0) {
3408 * PCI Express Error occured which is not described in PEX
3410 * This error is also mapped either to Master Abort(
3411 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3412 * can only be cleared there.
3414 device_printf(sc->msk_dev,
3415 "PCI Express protocol violation error\n");
3418 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3421 if ((status & Y2_IS_MST_ERR) != 0)
3422 device_printf(sc->msk_dev,
3423 "unexpected IRQ Status error\n");
3425 device_printf(sc->msk_dev,
3426 "unexpected IRQ Master error\n");
3427 /* Reset all bits in the PCI status register. */
3428 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3429 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3430 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3431 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3432 PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
3433 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3436 /* Check for PCI Express Uncorrectable Error. */
3437 if ((status & Y2_IS_PCI_EXP) != 0) {
3441 * On PCI Express bus bridges are called root complexes (RC).
3442 * PCI Express errors are recognized by the root complex too,
3443 * which requests the system to handle the problem. After
3444 * error occurence it may be that no access to the adapter
3445 * may be performed any longer.
3448 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3449 if ((v32 & PEX_UNSUP_REQ) != 0) {
3450 /* Ignore unsupported request error. */
3451 device_printf(sc->msk_dev,
3452 "Uncorrectable PCI Express error\n");
3454 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3457 /* Get TLP header form Log Registers. */
3458 for (i = 0; i < 4; i++)
3459 tlphead[i] = CSR_PCI_READ_4(sc,
3460 PEX_HEADER_LOG + i * 4);
3461 /* Check for vendor defined broadcast message. */
3462 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3463 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3464 CSR_WRITE_4(sc, B0_HWE_IMSK,
3465 sc->msk_intrhwemask);
3466 CSR_READ_4(sc, B0_HWE_IMSK);
3469 /* Clear the interrupt. */
3470 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3471 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3472 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3475 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3476 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3477 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3478 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3481 static __inline void
3482 msk_rxput(struct msk_if_softc *sc_if)
3484 struct msk_softc *sc;
3486 sc = sc_if->msk_softc;
3487 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN))
3489 sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3490 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3491 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3494 sc_if->msk_cdata.msk_rx_ring_tag,
3495 sc_if->msk_cdata.msk_rx_ring_map,
3496 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3497 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3498 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3502 msk_handle_events(struct msk_softc *sc)
3504 struct msk_if_softc *sc_if;
3506 struct msk_stat_desc *sd;
3507 uint32_t control, status;
3508 int cons, len, port, rxprog;
3510 if (sc->msk_stat_cons == CSR_READ_2(sc, STAT_PUT_IDX))
3513 /* Sync status LEs. */
3514 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3515 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3517 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3519 cons = sc->msk_stat_cons;
3521 sd = &sc->msk_stat_ring[cons];
3522 control = le32toh(sd->msk_control);
3523 if ((control & HW_OWNER) == 0)
3525 control &= ~HW_OWNER;
3526 sd->msk_control = htole32(control);
3527 status = le32toh(sd->msk_status);
3528 len = control & STLE_LEN_MASK;
3529 port = (control >> 16) & 0x01;
3530 sc_if = sc->msk_if[port];
3531 if (sc_if == NULL) {
3532 device_printf(sc->msk_dev, "invalid port opcode "
3533 "0x%08x\n", control & STLE_OP_MASK);
3537 switch (control & STLE_OP_MASK) {
3539 sc_if->msk_vtag = ntohs(len);
3542 sc_if->msk_vtag = ntohs(len);
3545 sc_if->msk_csum = status;
3548 if (!(sc_if->msk_ifp->if_drv_flags & IFF_DRV_RUNNING))
3550 if (sc_if->msk_framesize >
3551 (MCLBYTES - MSK_RX_BUF_ALIGN))
3552 msk_jumbo_rxeof(sc_if, status, control, len);
3554 msk_rxeof(sc_if, status, control, len);
3557 * Because there is no way to sync single Rx LE
3558 * put the DMA sync operation off until the end of
3562 /* Update prefetch unit if we've passed water mark. */
3563 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3569 if (sc->msk_if[MSK_PORT_A] != NULL)
3570 msk_txeof(sc->msk_if[MSK_PORT_A],
3571 status & STLE_TXA1_MSKL);
3572 if (sc->msk_if[MSK_PORT_B] != NULL)
3573 msk_txeof(sc->msk_if[MSK_PORT_B],
3574 ((status & STLE_TXA2_MSKL) >>
3576 ((len & STLE_TXA2_MSKH) <<
3580 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3581 control & STLE_OP_MASK);
3584 MSK_INC(cons, MSK_STAT_RING_CNT);
3585 if (rxprog > sc->msk_process_limit)
3589 sc->msk_stat_cons = cons;
3590 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3591 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3593 if (rxput[MSK_PORT_A] > 0)
3594 msk_rxput(sc->msk_if[MSK_PORT_A]);
3595 if (rxput[MSK_PORT_B] > 0)
3596 msk_rxput(sc->msk_if[MSK_PORT_B]);
3598 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3604 struct msk_softc *sc;
3605 struct msk_if_softc *sc_if0, *sc_if1;
3606 struct ifnet *ifp0, *ifp1;
3613 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3614 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3615 if (status == 0 || status == 0xffffffff ||
3616 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
3617 (status & sc->msk_intrmask) == 0) {
3618 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3623 sc_if0 = sc->msk_if[MSK_PORT_A];
3624 sc_if1 = sc->msk_if[MSK_PORT_B];
3627 ifp0 = sc_if0->msk_ifp;
3629 ifp1 = sc_if1->msk_ifp;
3631 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3632 msk_intr_phy(sc_if0);
3633 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3634 msk_intr_phy(sc_if1);
3635 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3636 msk_intr_gmac(sc_if0);
3637 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3638 msk_intr_gmac(sc_if1);
3639 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3640 device_printf(sc->msk_dev, "Rx descriptor error\n");
3641 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3642 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3643 CSR_READ_4(sc, B0_IMSK);
3645 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3646 device_printf(sc->msk_dev, "Tx descriptor error\n");
3647 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3648 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3649 CSR_READ_4(sc, B0_IMSK);
3651 if ((status & Y2_IS_HW_ERR) != 0)
3654 domore = msk_handle_events(sc);
3655 if ((status & Y2_IS_STAT_BMU) != 0 && domore == 0)
3656 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3658 /* Reenable interrupts. */
3659 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3661 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3662 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3663 msk_start_locked(ifp0);
3664 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3665 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3666 msk_start_locked(ifp1);
3672 msk_set_tx_stfwd(struct msk_if_softc *sc_if)
3674 struct msk_softc *sc;
3677 ifp = sc_if->msk_ifp;
3678 sc = sc_if->msk_softc;
3679 if ((sc->msk_hw_id == CHIP_ID_YUKON_EX &&
3680 sc->msk_hw_rev != CHIP_REV_YU_EX_A0) ||
3681 sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) {
3682 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3685 if (ifp->if_mtu > ETHERMTU) {
3686 /* Set Tx GMAC FIFO Almost Empty Threshold. */
3688 MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3689 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3690 /* Disable Store & Forward mode for Tx. */
3691 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3694 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3703 struct msk_if_softc *sc_if = xsc;
3706 msk_init_locked(sc_if);
3707 MSK_IF_UNLOCK(sc_if);
3711 msk_init_locked(struct msk_if_softc *sc_if)
3713 struct msk_softc *sc;
3715 struct mii_data *mii;
3721 MSK_IF_LOCK_ASSERT(sc_if);
3723 ifp = sc_if->msk_ifp;
3724 sc = sc_if->msk_softc;
3725 mii = device_get_softc(sc_if->msk_miibus);
3727 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3731 /* Cancel pending I/O and free all Rx/Tx buffers. */
3734 if (ifp->if_mtu < ETHERMTU)
3735 sc_if->msk_framesize = ETHERMTU;
3737 sc_if->msk_framesize = ifp->if_mtu;
3738 sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3739 if (ifp->if_mtu > ETHERMTU &&
3740 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
3741 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
3742 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
3745 /* GMAC Control reset. */
3746 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
3747 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
3748 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
3749 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
3750 sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
3751 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
3752 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
3756 * Initialize GMAC first such that speed/duplex/flow-control
3757 * parameters are renegotiated when interface is brought up.
3759 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
3761 /* Dummy read the Interrupt Source Register. */
3762 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3764 /* Clear MIB stats. */
3765 msk_stats_clear(sc_if);
3768 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3770 /* Setup Transmit Control Register. */
3771 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3773 /* Setup Transmit Flow Control Register. */
3774 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3776 /* Setup Transmit Parameter Register. */
3777 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3778 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3779 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3781 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3782 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3784 if (ifp->if_mtu > ETHERMTU)
3785 gmac |= GM_SMOD_JUMBO_ENA;
3786 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3788 /* Set station address. */
3789 eaddr = IF_LLADDR(ifp);
3790 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L,
3791 eaddr[0] | (eaddr[1] << 8));
3792 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1M,
3793 eaddr[2] | (eaddr[3] << 8));
3794 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1H,
3795 eaddr[4] | (eaddr[5] << 8));
3796 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L,
3797 eaddr[0] | (eaddr[1] << 8));
3798 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2M,
3799 eaddr[2] | (eaddr[3] << 8));
3800 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2H,
3801 eaddr[4] | (eaddr[5] << 8));
3803 /* Disable interrupts for counter overflows. */
3804 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3805 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3806 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3808 /* Configure Rx MAC FIFO. */
3809 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3810 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3811 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
3812 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
3813 sc->msk_hw_id == CHIP_ID_YUKON_EX)
3814 reg |= GMF_RX_OVER_ON;
3815 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
3817 /* Set receive filter. */
3818 msk_rxfilter(sc_if);
3820 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
3821 /* Clear flush mask - HW bug. */
3822 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0);
3824 /* Flush Rx MAC FIFO on any flow control or error. */
3825 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3830 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
3831 * due to hardware hang on receipt of pause frames.
3833 reg = RX_GMF_FL_THR_DEF + 1;
3834 /* Another magic for Yukon FE+ - From Linux. */
3835 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3836 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
3838 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
3840 /* Configure Tx MAC FIFO. */
3841 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3842 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3843 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3845 /* Configure hardware VLAN tag insertion/stripping. */
3846 msk_setvlan(sc_if, ifp);
3848 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
3849 /* Set Rx Pause threshold. */
3850 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3852 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3854 /* Configure store-and-forward for Tx. */
3855 msk_set_tx_stfwd(sc_if);
3858 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3859 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
3860 /* Disable dynamic watermark - from Linux. */
3861 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
3863 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
3867 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3868 * arbiter as we don't use Sync Tx queue.
3870 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3871 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3872 /* Enable the RAM Interface Arbiter. */
3873 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3875 /* Setup RAM buffer. */
3876 msk_set_rambuffer(sc_if);
3878 /* Disable Tx sync Queue. */
3879 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3881 /* Setup Tx Queue Bus Memory Interface. */
3882 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3883 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3884 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3885 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3886 switch (sc->msk_hw_id) {
3887 case CHIP_ID_YUKON_EC_U:
3888 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3889 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
3890 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
3894 case CHIP_ID_YUKON_EX:
3896 * Yukon Extreme seems to have silicon bug for
3897 * automatic Tx checksum calculation capability.
3899 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
3900 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
3905 /* Setup Rx Queue Bus Memory Interface. */
3906 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3907 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3908 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3909 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3910 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3911 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3912 /* MAC Rx RAM Read is controlled by hardware. */
3913 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3916 msk_set_prefetch(sc, sc_if->msk_txq,
3917 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3918 msk_init_tx_ring(sc_if);
3920 /* Disable Rx checksum offload and RSS hash. */
3921 reg = BMU_DIS_RX_RSS_HASH;
3922 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
3923 (ifp->if_capenable & IFCAP_RXCSUM) != 0)
3924 reg |= BMU_ENA_RX_CHKSUM;
3926 reg |= BMU_DIS_RX_CHKSUM;
3927 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), reg);
3928 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
3929 msk_set_prefetch(sc, sc_if->msk_rxq,
3930 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3931 MSK_JUMBO_RX_RING_CNT - 1);
3932 error = msk_init_jumbo_rx_ring(sc_if);
3934 msk_set_prefetch(sc, sc_if->msk_rxq,
3935 sc_if->msk_rdata.msk_rx_ring_paddr,
3936 MSK_RX_RING_CNT - 1);
3937 error = msk_init_rx_ring(sc_if);
3940 device_printf(sc_if->msk_if_dev,
3941 "initialization failed: no memory for Rx buffers\n");
3945 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
3946 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
3947 /* Disable flushing of non-ASF packets. */
3948 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3949 GMF_RX_MACSEC_FLUSH_OFF);
3952 /* Configure interrupt handling. */
3953 if (sc_if->msk_port == MSK_PORT_A) {
3954 sc->msk_intrmask |= Y2_IS_PORT_A;
3955 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3957 sc->msk_intrmask |= Y2_IS_PORT_B;
3958 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3960 /* Configure IRQ moderation mask. */
3961 CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask);
3962 if (sc->msk_int_holdoff > 0) {
3963 /* Configure initial IRQ moderation timer value. */
3964 CSR_WRITE_4(sc, B2_IRQM_INI,
3965 MSK_USECS(sc, sc->msk_int_holdoff));
3966 CSR_WRITE_4(sc, B2_IRQM_VAL,
3967 MSK_USECS(sc, sc->msk_int_holdoff));
3968 /* Start IRQ moderation. */
3969 CSR_WRITE_1(sc, B2_IRQM_CTRL, TIM_START);
3971 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3972 CSR_READ_4(sc, B0_HWE_IMSK);
3973 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3974 CSR_READ_4(sc, B0_IMSK);
3976 sc_if->msk_flags &= ~MSK_FLAG_LINK;
3979 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3980 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3982 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3986 msk_set_rambuffer(struct msk_if_softc *sc_if)
3988 struct msk_softc *sc;
3991 sc = sc_if->msk_softc;
3992 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
3995 /* Setup Rx Queue. */
3996 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3997 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3998 sc->msk_rxqstart[sc_if->msk_port] / 8);
3999 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
4000 sc->msk_rxqend[sc_if->msk_port] / 8);
4001 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
4002 sc->msk_rxqstart[sc_if->msk_port] / 8);
4003 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
4004 sc->msk_rxqstart[sc_if->msk_port] / 8);
4006 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
4007 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
4008 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
4009 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
4010 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
4011 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
4012 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
4013 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
4014 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
4016 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
4017 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
4019 /* Setup Tx Queue. */
4020 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
4021 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
4022 sc->msk_txqstart[sc_if->msk_port] / 8);
4023 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
4024 sc->msk_txqend[sc_if->msk_port] / 8);
4025 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
4026 sc->msk_txqstart[sc_if->msk_port] / 8);
4027 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
4028 sc->msk_txqstart[sc_if->msk_port] / 8);
4029 /* Enable Store & Forward for Tx side. */
4030 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
4031 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
4032 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
4036 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
4040 /* Reset the prefetch unit. */
4041 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
4043 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
4045 /* Set LE base address. */
4046 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
4048 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
4050 /* Set the list last index. */
4051 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
4053 /* Turn on prefetch unit. */
4054 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
4056 /* Dummy read to ensure write. */
4057 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
4061 msk_stop(struct msk_if_softc *sc_if)
4063 struct msk_softc *sc;
4064 struct msk_txdesc *txd;
4065 struct msk_rxdesc *rxd;
4066 struct msk_rxdesc *jrxd;
4071 MSK_IF_LOCK_ASSERT(sc_if);
4072 sc = sc_if->msk_softc;
4073 ifp = sc_if->msk_ifp;
4075 callout_stop(&sc_if->msk_tick_ch);
4076 sc_if->msk_watchdog_timer = 0;
4078 /* Disable interrupts. */
4079 if (sc_if->msk_port == MSK_PORT_A) {
4080 sc->msk_intrmask &= ~Y2_IS_PORT_A;
4081 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
4083 sc->msk_intrmask &= ~Y2_IS_PORT_B;
4084 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
4086 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
4087 CSR_READ_4(sc, B0_HWE_IMSK);
4088 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
4089 CSR_READ_4(sc, B0_IMSK);
4091 /* Disable Tx/Rx MAC. */
4092 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
4093 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
4094 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
4095 /* Read again to ensure writing. */
4096 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
4097 /* Update stats and clear counters. */
4098 msk_stats_update(sc_if);
4101 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
4102 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
4103 for (i = 0; i < MSK_TIMEOUT; i++) {
4104 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
4105 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
4107 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
4112 if (i == MSK_TIMEOUT)
4113 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
4114 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
4115 RB_RST_SET | RB_DIS_OP_MD);
4117 /* Disable all GMAC interrupt. */
4118 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
4119 /* Disable PHY interrupt. */
4120 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
4122 /* Disable the RAM Interface Arbiter. */
4123 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
4125 /* Reset the PCI FIFO of the async Tx queue */
4126 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
4127 BMU_RST_SET | BMU_FIFO_RST);
4129 /* Reset the Tx prefetch units. */
4130 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
4133 /* Reset the RAM Buffer async Tx queue. */
4134 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
4136 /* Reset Tx MAC FIFO. */
4137 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
4138 /* Set Pause Off. */
4139 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
4142 * The Rx Stop command will not work for Yukon-2 if the BMU does not
4143 * reach the end of packet and since we can't make sure that we have
4144 * incoming data, we must reset the BMU while it is not during a DMA
4145 * transfer. Since it is possible that the Rx path is still active,
4146 * the Rx RAM buffer will be stopped first, so any possible incoming
4147 * data will not trigger a DMA. After the RAM buffer is stopped, the
4148 * BMU is polled until any DMA in progress is ended and only then it
4152 /* Disable the RAM Buffer receive queue. */
4153 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
4154 for (i = 0; i < MSK_TIMEOUT; i++) {
4155 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
4156 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
4160 if (i == MSK_TIMEOUT)
4161 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
4162 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
4163 BMU_RST_SET | BMU_FIFO_RST);
4164 /* Reset the Rx prefetch unit. */
4165 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
4167 /* Reset the RAM Buffer receive queue. */
4168 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
4169 /* Reset Rx MAC FIFO. */
4170 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
4172 /* Free Rx and Tx mbufs still in the queues. */
4173 for (i = 0; i < MSK_RX_RING_CNT; i++) {
4174 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
4175 if (rxd->rx_m != NULL) {
4176 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
4177 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4178 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
4184 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
4185 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
4186 if (jrxd->rx_m != NULL) {
4187 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
4188 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4189 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
4191 m_freem(jrxd->rx_m);
4195 for (i = 0; i < MSK_TX_RING_CNT; i++) {
4196 txd = &sc_if->msk_cdata.msk_txdesc[i];
4197 if (txd->tx_m != NULL) {
4198 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
4199 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
4200 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
4208 * Mark the interface down.
4210 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4211 sc_if->msk_flags &= ~MSK_FLAG_LINK;
4215 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
4216 * counter clears high 16 bits of the counter such that accessing
4217 * lower 16 bits should be the last operation.
4219 #define MSK_READ_MIB32(x, y) \
4220 (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) + \
4221 (uint32_t)GMAC_READ_2(sc, x, y)
4222 #define MSK_READ_MIB64(x, y) \
4223 (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) + \
4224 (uint64_t)MSK_READ_MIB32(x, y)
4227 msk_stats_clear(struct msk_if_softc *sc_if)
4229 struct msk_softc *sc;
4234 MSK_IF_LOCK_ASSERT(sc_if);
4236 sc = sc_if->msk_softc;
4237 /* Set MIB Clear Counter Mode. */
4238 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
4239 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
4240 /* Read all MIB Counters with Clear Mode set. */
4241 for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof(uint32_t))
4242 reg = MSK_READ_MIB32(sc_if->msk_port, i);
4243 /* Clear MIB Clear Counter Mode. */
4244 gmac &= ~GM_PAR_MIB_CLR;
4245 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4249 msk_stats_update(struct msk_if_softc *sc_if)
4251 struct msk_softc *sc;
4253 struct msk_hw_stats *stats;
4257 MSK_IF_LOCK_ASSERT(sc_if);
4259 ifp = sc_if->msk_ifp;
4260 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
4262 sc = sc_if->msk_softc;
4263 stats = &sc_if->msk_stats;
4264 /* Set MIB Clear Counter Mode. */
4265 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
4266 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
4269 stats->rx_ucast_frames +=
4270 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK);
4271 stats->rx_bcast_frames +=
4272 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK);
4273 stats->rx_pause_frames +=
4274 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE);
4275 stats->rx_mcast_frames +=
4276 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK);
4277 stats->rx_crc_errs +=
4278 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR);
4279 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1);
4280 stats->rx_good_octets +=
4281 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO);
4282 stats->rx_bad_octets +=
4283 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO);
4285 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT);
4286 stats->rx_runt_errs +=
4287 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG);
4288 stats->rx_pkts_64 +=
4289 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B);
4290 stats->rx_pkts_65_127 +=
4291 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B);
4292 stats->rx_pkts_128_255 +=
4293 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B);
4294 stats->rx_pkts_256_511 +=
4295 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B);
4296 stats->rx_pkts_512_1023 +=
4297 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B);
4298 stats->rx_pkts_1024_1518 +=
4299 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B);
4300 stats->rx_pkts_1519_max +=
4301 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ);
4302 stats->rx_pkts_too_long +=
4303 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR);
4304 stats->rx_pkts_jabbers +=
4305 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT);
4306 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2);
4307 stats->rx_fifo_oflows +=
4308 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV);
4309 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3);
4312 stats->tx_ucast_frames +=
4313 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK);
4314 stats->tx_bcast_frames +=
4315 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK);
4316 stats->tx_pause_frames +=
4317 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE);
4318 stats->tx_mcast_frames +=
4319 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK);
4321 MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO);
4322 stats->tx_pkts_64 +=
4323 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B);
4324 stats->tx_pkts_65_127 +=
4325 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B);
4326 stats->tx_pkts_128_255 +=
4327 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B);
4328 stats->tx_pkts_256_511 +=
4329 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B);
4330 stats->tx_pkts_512_1023 +=
4331 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B);
4332 stats->tx_pkts_1024_1518 +=
4333 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B);
4334 stats->tx_pkts_1519_max +=
4335 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ);
4336 reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1);
4338 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL);
4339 stats->tx_late_colls +=
4340 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL);
4341 stats->tx_excess_colls +=
4342 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL);
4343 stats->tx_multi_colls +=
4344 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL);
4345 stats->tx_single_colls +=
4346 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL);
4347 stats->tx_underflows +=
4348 MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR);
4349 /* Clear MIB Clear Counter Mode. */
4350 gmac &= ~GM_PAR_MIB_CLR;
4351 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4355 msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
4357 struct msk_softc *sc;
4358 struct msk_if_softc *sc_if;
4359 uint32_t result, *stat;
4362 sc_if = (struct msk_if_softc *)arg1;
4363 sc = sc_if->msk_softc;
4365 stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off);
4368 result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4370 MSK_IF_UNLOCK(sc_if);
4372 return (sysctl_handle_int(oidp, &result, 0, req));
4376 msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
4378 struct msk_softc *sc;
4379 struct msk_if_softc *sc_if;
4380 uint64_t result, *stat;
4383 sc_if = (struct msk_if_softc *)arg1;
4384 sc = sc_if->msk_softc;
4386 stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off);
4389 result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4391 MSK_IF_UNLOCK(sc_if);
4393 return (sysctl_handle_64(oidp, &result, 0, req));
4396 #undef MSK_READ_MIB32
4397 #undef MSK_READ_MIB64
4399 #define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) \
4400 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \
4401 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32, \
4403 #define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) \
4404 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_U64 | CTLFLAG_RD, \
4405 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64, \
4409 msk_sysctl_node(struct msk_if_softc *sc_if)
4411 struct sysctl_ctx_list *ctx;
4412 struct sysctl_oid_list *child, *schild;
4413 struct sysctl_oid *tree;
4415 ctx = device_get_sysctl_ctx(sc_if->msk_if_dev);
4416 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev));
4418 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
4419 NULL, "MSK Statistics");
4420 schild = child = SYSCTL_CHILDREN(tree);
4421 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
4422 NULL, "MSK RX Statistics");
4423 child = SYSCTL_CHILDREN(tree);
4424 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4425 child, rx_ucast_frames, "Good unicast frames");
4426 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4427 child, rx_bcast_frames, "Good broadcast frames");
4428 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4429 child, rx_pause_frames, "Pause frames");
4430 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4431 child, rx_mcast_frames, "Multicast frames");
4432 MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs",
4433 child, rx_crc_errs, "CRC errors");
4434 MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets",
4435 child, rx_good_octets, "Good octets");
4436 MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets",
4437 child, rx_bad_octets, "Bad octets");
4438 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4439 child, rx_pkts_64, "64 bytes frames");
4440 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4441 child, rx_pkts_65_127, "65 to 127 bytes frames");
4442 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4443 child, rx_pkts_128_255, "128 to 255 bytes frames");
4444 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4445 child, rx_pkts_256_511, "256 to 511 bytes frames");
4446 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4447 child, rx_pkts_512_1023, "512 to 1023 bytes frames");
4448 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4449 child, rx_pkts_1024_1518, "1024 to 1518 bytes frames");
4450 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4451 child, rx_pkts_1519_max, "1519 to max frames");
4452 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long",
4453 child, rx_pkts_too_long, "frames too long");
4454 MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers",
4455 child, rx_pkts_jabbers, "Jabber errors");
4456 MSK_SYSCTL_STAT32(sc_if, ctx, "overflows",
4457 child, rx_fifo_oflows, "FIFO overflows");
4459 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
4460 NULL, "MSK TX Statistics");
4461 child = SYSCTL_CHILDREN(tree);
4462 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4463 child, tx_ucast_frames, "Unicast frames");
4464 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4465 child, tx_bcast_frames, "Broadcast frames");
4466 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4467 child, tx_pause_frames, "Pause frames");
4468 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4469 child, tx_mcast_frames, "Multicast frames");
4470 MSK_SYSCTL_STAT64(sc_if, ctx, "octets",
4471 child, tx_octets, "Octets");
4472 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4473 child, tx_pkts_64, "64 bytes frames");
4474 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4475 child, tx_pkts_65_127, "65 to 127 bytes frames");
4476 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4477 child, tx_pkts_128_255, "128 to 255 bytes frames");
4478 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4479 child, tx_pkts_256_511, "256 to 511 bytes frames");
4480 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4481 child, tx_pkts_512_1023, "512 to 1023 bytes frames");
4482 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4483 child, tx_pkts_1024_1518, "1024 to 1518 bytes frames");
4484 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4485 child, tx_pkts_1519_max, "1519 to max frames");
4486 MSK_SYSCTL_STAT32(sc_if, ctx, "colls",
4487 child, tx_colls, "Collisions");
4488 MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls",
4489 child, tx_late_colls, "Late collisions");
4490 MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls",
4491 child, tx_excess_colls, "Excessive collisions");
4492 MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls",
4493 child, tx_multi_colls, "Multiple collisions");
4494 MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls",
4495 child, tx_single_colls, "Single collisions");
4496 MSK_SYSCTL_STAT32(sc_if, ctx, "underflows",
4497 child, tx_underflows, "FIFO underflows");
4500 #undef MSK_SYSCTL_STAT32
4501 #undef MSK_SYSCTL_STAT64
4504 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4510 value = *(int *)arg1;
4511 error = sysctl_handle_int(oidp, &value, 0, req);
4512 if (error || !req->newptr)
4514 if (value < low || value > high)
4516 *(int *)arg1 = value;
4522 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
4525 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,